repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
iEngage/python-sdk | iengage_client/models/tag.py | 1 | 3896 | # coding: utf-8
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Tag(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, tag_id=None, tag_name=None, count=None):
"""
Tag - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'tag_id': 'int',
'tag_name': 'str',
'count': 'int'
}
self.attribute_map = {
'tag_id': 'tagId',
'tag_name': 'tagName',
'count': 'count'
}
self._tag_id = tag_id
self._tag_name = tag_name
self._count = count
@property
def tag_id(self):
"""
Gets the tag_id of this Tag.
:return: The tag_id of this Tag.
:rtype: int
"""
return self._tag_id
@tag_id.setter
def tag_id(self, tag_id):
"""
Sets the tag_id of this Tag.
:param tag_id: The tag_id of this Tag.
:type: int
"""
self._tag_id = tag_id
@property
def tag_name(self):
"""
Gets the tag_name of this Tag.
:return: The tag_name of this Tag.
:rtype: str
"""
return self._tag_name
@tag_name.setter
def tag_name(self, tag_name):
"""
Sets the tag_name of this Tag.
:param tag_name: The tag_name of this Tag.
:type: str
"""
self._tag_name = tag_name
@property
def count(self):
"""
Gets the count of this Tag.
:return: The count of this Tag.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""
Sets the count of this Tag.
:param count: The count of this Tag.
:type: int
"""
self._count = count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -7,756,863,969,602,901,000 | 23.35 | 186 | 0.500257 | false |
scieloorg/opac | opac/tests/test_main_views_abstract.py | 1 | 4558 | # coding: utf-8
# import unittest
# from unittest.mock import patch, Mock
from unittest.mock import patch
import flask
from flask import url_for, g, current_app
# from flask import render_template
# from flask_babelex import gettext as _
from .base import BaseTestCase
from . import utils
class TestArticleDetailV3Abstract(BaseTestCase):
def _get_response(self, article_data=None, part='abstract', pid_v2=None,
abstract_lang=None):
with current_app.test_request_context():
utils.makeOneCollection()
self.journal = utils.makeOneJournal()
issue = utils.makeOneIssue({'journal': self.journal})
_article_data = {
'title': 'Article Y',
'original_language': 'en',
'languages': ['es', 'pt', 'en'],
'issue': issue,
'journal': self.journal,
'abstracts': [
{"language": "en", "text": "Abstract in English"},
],
'url_segment': '10-11',
'translated_titles': [
{'language': 'es', 'name': u'Artículo en español'},
{'language': 'pt', 'name': u'Artigo en Português'},
],
'pid': 'PIDV2'
}
_article_data.update(article_data or {})
self.article = utils.makeOneArticle(_article_data)
if pid_v2:
url = '%s?script=sci_abstract&pid=%s' % (
url_for('main.router_legacy'), pid_v2)
else:
url = url_for(
'main.article_detail_v3',
url_seg=self.journal.url_segment,
article_pid_v3=self.article.aid,
part=part,
lang=abstract_lang,
)
response = self.client.get(url)
return response
def test_abstract_pid_v3_returns_404_and_displays_invalid_part_value_message(self):
expected = "Não existe 'abst'. No seu lugar use 'abstract'"
response = self._get_response(part='abst')
self.assertStatus(response, 404)
result = response.data.decode('utf-8')
self.assertIn(expected, result)
def test_abstract_pid_v3_returns_status_code_200(self):
response = self._get_response(abstract_lang="en")
self.assertStatus(response, 200)
@patch("webapp.main.views.render_html")
def test_abstract_pid_v3(self, mock_render_html):
mock_render_html.return_value = (
"abstract do documento",
['en', 'es', 'pt'],
)
expected = "abstract do documento"
response = self._get_response(abstract_lang="en")
result = response.data.decode('utf-8')
mock_render_html.assert_called_once_with(self.article, "en", True)
self.assertIn(expected, result)
@patch("webapp.main.views.render_html")
def test_abstract_pid_v3_does_not_return_abstract_but_fulltext_if_part_is_None(self, mock_render_html):
mock_render_html.return_value = (
"texto do documento",
['en', 'es', 'pt'],
)
expected = "texto do documento"
response = self._get_response(part=None)
result = response.data.decode('utf-8')
mock_render_html.assert_called_once_with(self.article, "en", False)
self.assertIn(expected, result)
def test_abstract_pid_v3_returns_404_because_lang_is_missing(self):
expected = "Não existe 'False'. No seu lugar use 'abstract'"
response = self._get_response(part=False)
self.assertStatus(response, 404)
result = response.data.decode('utf-8')
self.assertIn(expected, result)
def test_abstract_pid_v3_returns_404_and_displays_invalid_part_value_message_if_part_is_False(self):
expected = "Não existe 'False'. No seu lugar use 'abstract'"
response = self._get_response(part=False)
self.assertStatus(response, 404)
result = response.data.decode('utf-8')
self.assertIn(expected, result)
@patch("webapp.main.views.render_html")
def test_router_legacy_calls(self, mock_f):
response = self._get_response(pid_v2='pidv2')
self.assertRedirects(
response,
url_for(
'main.article_detail_v3',
url_seg=self.journal.url_segment,
article_pid_v3=self.article.aid,
part='abstract',
lang="en",
),
)
| bsd-2-clause | 7,343,148,106,735,971,000 | 37.905983 | 107 | 0.564807 | false |
silverfix/django-concierge | concierge/urls.py | 1 | 1808 | # -*- coding: utf-8 -
from django.conf.urls import url
from django.contrib.auth import views as django_auth_views
from . import forms
from . import views
urlpatterns = [
url(r'^signup/$', views.SignupView.as_view(template_name='concierge/signup.html'), name='signup'),
url(r'^login/$', django_auth_views.login,
{'authentication_form': forms.LoginForm, 'template_name': 'concierge/login.html'}, name='login'),
url(r'^logout/$', views.LogoutView.as_view(template_name='concierge/logout.html'), name='logout'),
url(r'^password_change/$', django_auth_views.password_change, name='password_change'),
url(r'^password_change/done/$', django_auth_views.password_change_done, name='password_change_done'),
url(r'^password_reset/$', django_auth_views.password_reset,
kwargs={
'template_name': 'concierge/password_reset_form.html',
'post_reset_redirect': 'concierge:password_reset_done',
'email_template_name': 'concierge/password_reset_email.html'
}, name='password_reset'),
url(r'^password_reset/done/$', django_auth_views.password_reset_done,
kwargs={
'template_name': 'concierge/password_reset_done.html',
},
name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
django_auth_views.password_reset_confirm,
kwargs={
'template_name': 'concierge/password_reset_confirm.html',
'post_reset_redirect': 'concierge:password_reset_complete'
}, name='password_reset_confirm'),
url(r'^reset/done/$', django_auth_views.password_reset_complete,
kwargs={
'template_name': 'concierge/password_reset_complete.html'
}, name='password_reset_complete'),
]
| bsd-3-clause | -6,275,472,563,960,320,000 | 46.578947 | 105 | 0.644358 | false |
markflyhigh/incubator-beam | sdks/python/apache_beam/io/source_test_utils.py | 1 | 26317 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper functions and test harnesses for source implementations.
This module contains helper functions and test harnesses for checking
correctness of source (a subclass of ``iobase.BoundedSource``) and range
tracker (a subclass of``iobase.RangeTracker``) implementations.
Contains a few lightweight utilities (e.g. reading items from a source such as
``readFromSource()``, as well as heavyweight property testing and stress
testing harnesses that help getting a large amount of test coverage with few
code.
Most notable ones are:
* ``assertSourcesEqualReferenceSource()`` helps testing that the data read by
the union of sources produced by ``BoundedSource.split()`` is the same as data
read by the original source.
* If your source implements dynamic work rebalancing, use the
``assertSplitAtFraction()`` family of functions - they test behavior of
``RangeTracker.try_split()``, in particular, that various consistency
properties are respected and the total set of data read by the source is
preserved when splits happen. Use ``assertSplitAtFractionBehavior()`` to test
individual cases of ``RangeTracker.try_split()`` and use
``assertSplitAtFractionExhaustive()`` as a heavy-weight stress test including
concurrency. We strongly recommend to use both.
For example usages, see the unit tests of modules such as
* apache_beam.io.source_test_utils_test.py
* apache_beam.io.avroio_test.py
"""
from __future__ import absolute_import
from __future__ import division
import logging
import threading
import weakref
from builtins import next
from builtins import object
from builtins import range
from collections import namedtuple
from multiprocessing.pool import ThreadPool
from apache_beam.io import iobase
from apache_beam.testing.util import equal_to
__all__ = ['read_from_source',
'assert_sources_equal_reference_source',
'assert_reentrant_reads_succeed',
'assert_split_at_fraction_behavior',
'assert_split_at_fraction_binary',
'assert_split_at_fraction_exhaustive',
'assert_split_at_fraction_fails',
'assert_split_at_fraction_succeeds_and_consistent']
class ExpectedSplitOutcome(object):
MUST_SUCCEED_AND_BE_CONSISTENT = 1
MUST_FAIL = 2
MUST_BE_CONSISTENT_IF_SUCCEEDS = 3
SplitAtFractionResult = namedtuple(
'SplitAtFractionResult', 'num_primary_items num_residual_items')
SplitFractionStatistics = namedtuple(
'SplitFractionStatistics',
'successful_fractions non_trivial_fractions')
def read_from_source(source, start_position=None, stop_position=None):
"""Reads elements from the given ```BoundedSource```.
Only reads elements within the given position range.
Args:
source (~apache_beam.io.iobase.BoundedSource):
:class:`~apache_beam.io.iobase.BoundedSource` implementation.
start_position (int): start position for reading.
stop_position (int): stop position for reading.
Returns:
List[str]: the set of values read from the sources.
"""
values = []
range_tracker = source.get_range_tracker(start_position, stop_position)
assert isinstance(range_tracker, iobase.RangeTracker)
reader = source.read(range_tracker)
for value in reader:
values.append(value)
return values
def _ThreadPool(threads):
# ThreadPool crashes in old versions of Python (< 2.7.5) if created from a
# child thread. (http://bugs.python.org/issue10015)
if not hasattr(threading.current_thread(), '_children'):
threading.current_thread()._children = weakref.WeakKeyDictionary()
return ThreadPool(threads)
def assert_sources_equal_reference_source(reference_source_info, sources_info):
"""Tests if a reference source is equal to a given set of sources.
Given a reference source (a :class:`~apache_beam.io.iobase.BoundedSource`
and a position range) and a list of sources, assert that the union of the
records read from the list of sources is equal to the records read from the
reference source.
Args:
reference_source_info\
(Tuple[~apache_beam.io.iobase.BoundedSource, int, int]):
a three-tuple that gives the reference
:class:`~apache_beam.io.iobase.BoundedSource`, position to start
reading at, and position to stop reading at.
sources_info\
(Iterable[Tuple[~apache_beam.io.iobase.BoundedSource, int, int]]):
a set of sources. Each source is a three-tuple that is of the same
format described above.
Raises:
~exceptions.ValueError: if the set of data produced by the reference source
and the given set of sources are not equivalent.
"""
if not (isinstance(reference_source_info, tuple) and
len(reference_source_info) == 3 and
isinstance(reference_source_info[0], iobase.BoundedSource)):
raise ValueError('reference_source_info must a three-tuple where first'
'item of the tuple gives a '
'iobase.BoundedSource. Received: %r'
% reference_source_info)
reference_records = read_from_source(
*reference_source_info)
source_records = []
for source_info in sources_info:
assert isinstance(source_info, tuple)
assert len(source_info) == 3
if not (isinstance(source_info, tuple) and
len(source_info) == 3 and
isinstance(source_info[0], iobase.BoundedSource)):
raise ValueError('source_info must a three tuple where first'
'item of the tuple gives a '
'iobase.BoundedSource. Received: %r'
% source_info)
if (type(reference_source_info[0].default_output_coder()) !=
type(source_info[0].default_output_coder())):
raise ValueError(
'Reference source %r and the source %r must use the same coder. '
'They are using %r and %r respectively instead.'
% (reference_source_info[0], source_info[0],
type(reference_source_info[0].default_output_coder()),
type(source_info[0].default_output_coder())))
source_records.extend(read_from_source(*source_info))
if len(reference_records) != len(source_records):
raise ValueError(
'Reference source must produce the same number of records as the '
'list of sources. Number of records were %d and %d instead.'
% (len(reference_records), len(source_records)))
if equal_to(reference_records)(source_records):
raise ValueError(
'Reference source and provided list of sources must produce the '
'same set of records.')
def assert_reentrant_reads_succeed(source_info):
"""Tests if a given source can be read in a reentrant manner.
Assume that given source produces the set of values ``{v1, v2, v3, ... vn}``.
For ``i`` in range ``[1, n-1]`` this method performs a reentrant read after
reading ``i`` elements and verifies that both the original and reentrant read
produce the expected set of values.
Args:
source_info (Tuple[~apache_beam.io.iobase.BoundedSource, int, int]):
a three-tuple that gives the reference
:class:`~apache_beam.io.iobase.BoundedSource`, position to start reading
at, and a position to stop reading at.
Raises:
~exceptions.ValueError: if source is too trivial or reentrant read result
in an incorrect read.
"""
source, start_position, stop_position = source_info
assert isinstance(source, iobase.BoundedSource)
expected_values = [val for val in source.read(source.get_range_tracker(
start_position, stop_position))]
if len(expected_values) < 2:
raise ValueError('Source is too trivial since it produces only %d '
'values. Please give a source that reads at least 2 '
'values.' % len(expected_values))
for i in range(1, len(expected_values) - 1):
read_iter = source.read(source.get_range_tracker(
start_position, stop_position))
original_read = []
for _ in range(i):
original_read.append(next(read_iter))
# Reentrant read
reentrant_read = [val for val in source.read(
source.get_range_tracker(start_position, stop_position))]
# Continuing original read.
for val in read_iter:
original_read.append(val)
if equal_to(original_read)(expected_values):
raise ValueError('Source did not produce expected values when '
'performing a reentrant read after reading %d values. '
'Expected %r received %r.'
% (i, expected_values, original_read))
if equal_to(reentrant_read)(expected_values):
raise ValueError('A reentrant read of source after reading %d values '
'did not produce expected values. Expected %r '
'received %r.'
% (i, expected_values, reentrant_read))
def assert_split_at_fraction_behavior(source, num_items_to_read_before_split,
split_fraction, expected_outcome):
"""Verifies the behaviour of splitting a source at a given fraction.
Asserts that splitting a :class:`~apache_beam.io.iobase.BoundedSource` either
fails after reading **num_items_to_read_before_split** items, or succeeds in
a way that is consistent according to
:func:`assert_split_at_fraction_succeeds_and_consistent()`.
Args:
source (~apache_beam.io.iobase.BoundedSource): the source to perform
dynamic splitting on.
num_items_to_read_before_split (int): number of items to read before
splitting.
split_fraction (float): fraction to split at.
expected_outcome (int): a value from
:class:`~apache_beam.io.source_test_utils.ExpectedSplitOutcome`.
Returns:
Tuple[int, int]: a tuple that gives the number of items produced by reading
the two ranges produced after dynamic splitting. If splitting did not
occur, the first value of the tuple will represent the full set of records
read by the source while the second value of the tuple will be ``-1``.
"""
assert isinstance(source, iobase.BoundedSource)
expected_items = read_from_source(source, None, None)
return _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split, split_fraction,
expected_outcome)
def _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split,
split_fraction, expected_outcome, start_position=None, stop_position=None):
range_tracker = source.get_range_tracker(start_position, stop_position)
assert isinstance(range_tracker, iobase.RangeTracker)
current_items = []
reader = source.read(range_tracker)
# Reading 'num_items_to_read_before_split' items.
reader_iter = iter(reader)
for _ in range(num_items_to_read_before_split):
current_items.append(next(reader_iter))
suggested_split_position = range_tracker.position_at_fraction(
split_fraction)
stop_position_before_split = range_tracker.stop_position()
split_result = range_tracker.try_split(suggested_split_position)
if split_result is not None:
if len(split_result) != 2:
raise ValueError('Split result must be a tuple that contains split '
'position and split fraction. Received: %r' %
(split_result,))
if range_tracker.stop_position() != split_result[0]:
raise ValueError('After a successful split, the stop position of the '
'RangeTracker must be the same as the returned split '
'position. Observed %r and %r which are different.'
% (range_tracker.stop_position() % (split_result[0],)))
if split_fraction < 0 or split_fraction > 1:
raise ValueError('Split fraction must be within the range [0,1]',
'Observed split fraction was %r.' % (split_result[1],))
stop_position_after_split = range_tracker.stop_position()
if split_result and stop_position_after_split == stop_position_before_split:
raise ValueError('Stop position %r did not change after a successful '
'split of source %r at fraction %r.' %
(stop_position_before_split, source, split_fraction))
if expected_outcome == ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT:
if not split_result:
raise ValueError('Expected split of source %r at fraction %r to be '
'successful after reading %d elements. But '
'the split failed.' %
(source, split_fraction, num_items_to_read_before_split))
elif expected_outcome == ExpectedSplitOutcome.MUST_FAIL:
if split_result:
raise ValueError('Expected split of source %r at fraction %r after '
'reading %d elements to fail. But splitting '
'succeeded with result %r.' %
(source, split_fraction, num_items_to_read_before_split,
split_result))
elif (expected_outcome !=
ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS):
raise ValueError('Unknown type of expected outcome: %r' %
expected_outcome)
current_items.extend([value for value in reader_iter])
residual_range = (
split_result[0], stop_position_before_split) if split_result else None
return _verify_single_split_fraction_result(
source, expected_items, current_items,
split_result,
(range_tracker.start_position(), range_tracker.stop_position()),
residual_range, split_fraction)
def _range_to_str(start, stop):
return '[' + (str(start) + ',' + str(stop) + ')')
def _verify_single_split_fraction_result(
source, expected_items, current_items, split_successful, primary_range,
residual_range, split_fraction):
assert primary_range
primary_items = read_from_source(source, *primary_range)
if not split_successful:
# For unsuccessful splits, residual_range should be None.
assert not residual_range
residual_items = (
read_from_source(source, *residual_range)
if split_successful else [])
total_items = primary_items + residual_items
if current_items != primary_items:
raise ValueError('Current source %r and a source created using the '
'range of the primary source %r determined '
'by performing dynamic work rebalancing at fraction '
'%r produced different values. Expected '
'these sources to produce the same list of values.'
% (source,
_range_to_str(*primary_range),
split_fraction)
)
if expected_items != total_items:
raise ValueError('Items obtained by reading the source %r for primary '
'and residual ranges %s and %s did not produce the '
'expected list of values.'
% (source,
_range_to_str(*primary_range),
_range_to_str(*residual_range)))
result = (len(primary_items),
len(residual_items) if split_successful else -1)
return result
def assert_split_at_fraction_succeeds_and_consistent(
source, num_items_to_read_before_split, split_fraction):
"""Verifies some consistency properties of dynamic work rebalancing.
Equivalent to the following pseudocode:::
original_range_tracker = source.getRangeTracker(None, None)
original_reader = source.read(original_range_tracker)
items_before_split = read N items from original_reader
suggested_split_position = original_range_tracker.position_for_fraction(
split_fraction)
original_stop_position - original_range_tracker.stop_position()
split_result = range_tracker.try_split()
split_position, split_fraction = split_result
primary_range_tracker = source.get_range_tracker(
original_range_tracker.start_position(), split_position)
residual_range_tracker = source.get_range_tracker(split_position,
original_stop_position)
assert that: items when reading source.read(primary_range_tracker) ==
items_before_split + items from continuing to read 'original_reader'
assert that: items when reading source.read(original_range_tracker) =
items when reading source.read(primary_range_tracker) + items when reading
source.read(residual_range_tracker)
Args:
source: source to perform dynamic work rebalancing on.
num_items_to_read_before_split: number of items to read before splitting.
split_fraction: fraction to split at.
"""
assert_split_at_fraction_behavior(
source, num_items_to_read_before_split, split_fraction,
ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT)
def assert_split_at_fraction_fails(source, num_items_to_read_before_split,
split_fraction):
"""Asserts that dynamic work rebalancing at a given fraction fails.
Asserts that trying to perform dynamic splitting after reading
'num_items_to_read_before_split' items from the source fails.
Args:
source: source to perform dynamic splitting on.
num_items_to_read_before_split: number of items to read before splitting.
split_fraction: fraction to split at.
"""
assert_split_at_fraction_behavior(
source, num_items_to_read_before_split, split_fraction,
ExpectedSplitOutcome.MUST_FAIL)
def assert_split_at_fraction_binary(
source, expected_items, num_items_to_read_before_split, left_fraction,
left_result, right_fraction, right_result, stats, start_position=None,
stop_position=None):
"""Performs dynamic work rebalancing for fractions within a given range.
Asserts that given a start position, a source can be split at every
interesting fraction (halfway between two fractions that differ by at
least one item) and the results are consistent if a split succeeds.
Args:
source: source to perform dynamic splitting on.
expected_items: total set of items expected when reading the source.
num_items_to_read_before_split: number of items to read before splitting.
left_fraction: left fraction for binary splitting.
left_result: result received by splitting at left fraction.
right_fraction: right fraction for binary splitting.
right_result: result received by splitting at right fraction.
stats: a ``SplitFractionStatistics`` for storing results.
"""
assert right_fraction > left_fraction
if right_fraction - left_fraction < 0.001:
# This prevents infinite recursion.
return
middle_fraction = (left_fraction + right_fraction) / 2
if left_result is None:
left_result = _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split, left_fraction,
ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
if right_result is None:
right_result = _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split,
right_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
middle_result = _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split, middle_fraction,
ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
if middle_result[1] != -1:
stats.successful_fractions.append(middle_fraction)
if middle_result[1] > 0:
stats.non_trivial_fractions.append(middle_fraction)
# Two split results are equivalent if primary and residual ranges of them
# produce the same number of records (simply checking the size of primary
# enough since the total number of records is constant).
if left_result[0] != middle_result[0]:
assert_split_at_fraction_binary(
source, expected_items, num_items_to_read_before_split, left_fraction,
left_result, middle_fraction, middle_result, stats)
# We special case right_fraction=1.0 since that could fail due to being out
# of range. (even if a dynamic split fails at 'middle_fraction' and at
# fraction 1.0, there might be fractions in range ('middle_fraction', 1.0)
# where dynamic splitting succeeds).
if right_fraction == 1.0 or middle_result[0] != right_result[0]:
assert_split_at_fraction_binary(
source, expected_items, num_items_to_read_before_split,
middle_fraction, middle_result, right_fraction, right_result, stats)
MAX_CONCURRENT_SPLITTING_TRIALS_PER_ITEM = 100
MAX_CONCURRENT_SPLITTING_TRIALS_TOTAL = 1000
def assert_split_at_fraction_exhaustive(
source, start_position=None, stop_position=None,
perform_multi_threaded_test=True):
"""Performs and tests dynamic work rebalancing exhaustively.
Asserts that for each possible start position, a source can be split at
every interesting fraction (halfway between two fractions that differ by at
least one item) and the results are consistent if a split succeeds.
Verifies multi threaded splitting as well.
Args:
source (~apache_beam.io.iobase.BoundedSource): the source to perform
dynamic splitting on.
perform_multi_threaded_test (bool): if :data:`True` performs a
multi-threaded test, otherwise this test is skipped.
Raises:
~exceptions.ValueError: if the exhaustive splitting test fails.
"""
expected_items = read_from_source(source, start_position, stop_position)
if not expected_items:
raise ValueError('Source %r is empty.' % source)
if len(expected_items) == 1:
raise ValueError('Source %r only reads a single item.' % source)
all_non_trivial_fractions = []
any_successful_fractions = False
any_non_trivial_fractions = False
for i in range(len(expected_items)):
stats = SplitFractionStatistics([], [])
assert_split_at_fraction_binary(
source, expected_items, i, 0.0, None, 1.0, None, stats)
if stats.successful_fractions:
any_successful_fractions = True
if stats.non_trivial_fractions:
any_non_trivial_fractions = True
all_non_trivial_fractions.append(stats.non_trivial_fractions)
if not any_successful_fractions:
raise ValueError('SplitAtFraction test completed vacuously: no '
'successful split fractions found')
if not any_non_trivial_fractions:
raise ValueError(
'SplitAtFraction test completed vacuously: no non-trivial split '
'fractions found')
if not perform_multi_threaded_test:
return
num_total_trials = 0
for i in range(len(expected_items)):
non_trivial_fractions = [2.0] # 2.0 is larger than any valid fraction.
non_trivial_fractions.extend(all_non_trivial_fractions[i])
min_non_trivial_fraction = min(non_trivial_fractions)
if min_non_trivial_fraction == 2.0:
# This will not happen all the time. Otherwise previous test will fail
# due to vacuousness.
continue
num_trials = 0
have_success = False
have_failure = False
thread_pool = _ThreadPool(2)
try:
while True:
num_trials += 1
if (num_trials >
MAX_CONCURRENT_SPLITTING_TRIALS_PER_ITEM):
logging.warn(
'After %d concurrent splitting trials at item #%d, observed '
'only %s, giving up on this item',
num_trials,
i,
'success' if have_success else 'failure'
)
break
if _assert_split_at_fraction_concurrent(
source, expected_items, i, min_non_trivial_fraction, thread_pool):
have_success = True
else:
have_failure = True
if have_success and have_failure:
logging.info('%d trials to observe both success and failure of '
'concurrent splitting at item #%d', num_trials, i)
break
finally:
thread_pool.close()
num_total_trials += num_trials
if num_total_trials > MAX_CONCURRENT_SPLITTING_TRIALS_TOTAL:
logging.warn('After %d total concurrent splitting trials, considered '
'only %d items, giving up.', num_total_trials, i)
break
logging.info('%d total concurrent splitting trials for %d items',
num_total_trials, len(expected_items))
def _assert_split_at_fraction_concurrent(
source, expected_items, num_items_to_read_before_splitting,
split_fraction, thread_pool=None):
range_tracker = source.get_range_tracker(None, None)
stop_position_before_split = range_tracker.stop_position()
reader = source.read(range_tracker)
reader_iter = iter(reader)
current_items = []
for _ in range(num_items_to_read_before_splitting):
current_items.append(next(reader_iter))
def read_or_split(test_params):
if test_params[0]:
return [val for val in test_params[1]]
else:
position = test_params[1].position_at_fraction(test_params[2])
result = test_params[1].try_split(position)
return result
inputs = []
pool = thread_pool if thread_pool else _ThreadPool(2)
try:
inputs.append([True, reader_iter])
inputs.append([False, range_tracker, split_fraction])
results = pool.map(read_or_split, inputs)
finally:
if not thread_pool:
pool.close()
current_items.extend(results[0])
primary_range = (
range_tracker.start_position(), range_tracker.stop_position())
split_result = results[1]
residual_range = (
split_result[0], stop_position_before_split) if split_result else None
res = _verify_single_split_fraction_result(
source, expected_items, current_items, split_result,
primary_range, residual_range, split_fraction)
return res[1] > 0
| apache-2.0 | 3,306,729,936,039,604,700 | 38.396707 | 80 | 0.68621 | false |
Letractively/aha-gae | aha/modelcontroller/tests/test_crudhander.py | 1 | 1715 | # -*- coding: utf-8 -*-
from unittest import TestCase
import logging
log = logging.getLogger(__name__)
from nose.tools import *
from aha.modelcontroller.formcontrol import FormControl, handle_state, validate
FC = FormControl
class TestCRUDControllerMixIn(TestCase):
def test_subclass(self):
"""
Test for subclassing CRUDControllerMixIn
"""
from coregae.controller.crudcontrollers import (
CRUDControllerMixIn, CRUDControllerMetaClass)
class TestKlass(CRUDControllerMixIn):
EDIT_FC = FormControl()
ADD_FC = FormControl()
@EDIT_FC.handle_state(FC.SUCCESS)
def edit_data(self):
return "FOO"
@EDIT_FC.handle_validate(FC.INITIAL)
def edit_validate(self):
pass
@ADD_FC.handle_state(FC.INITIAL)
def add_form(self):
return "FOO"
assert_true(hasattr(TestKlass, 'EDIT_FC'))
assert_true(hasattr(TestKlass, 'ADD_FC'))
assert_not_equal(TestKlass.EDIT_FC, CRUDControllerMixIn.EDIT_FC)
efc = TestKlass.EDIT_FC
efc2 = CRUDControllerMixIn.EDIT_FC
assert_equal(efc.get_processor(FC.INITIAL),
efc2.get_processor(FC.INITIAL))
assert_not_equal(efc.get_processor(FC.SUCCESS),
efc2.get_processor(FC.SUCCESS))
assert_not_equal(efc.get_validator(FC.INITIAL),
efc2.get_validator(FC.INITIAL))
afc = TestKlass.ADD_FC
afc2 = CRUDControllerMixIn.ADD_FC
assert_not_equal(afc.get_processor(FC.INITIAL),
afc2.get_processor(FC.INITIAL))
| bsd-3-clause | -1,365,521,850,367,006,700 | 27.114754 | 79 | 0.596501 | false |
EmreAtes/spack | var/spack/repos/builtin/packages/texlive/package.py | 1 | 3446 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Texlive(Package):
"""TeX Live is a free software distribution for the TeX typesetting
system. Heads up, it's is not a reproducible installation."""
homepage = "http://www.tug.org/texlive"
# Install from specific site because the texlive mirrors do not
# all update in synchrony.
#
# BEWARE: TexLive updates their installs frequently (probably why
# they call it *Live*...). There is no good way to provide a
# repeatable install of the package.
#
# We're now pulling the installation bits from tug.org's repo of
# historic bits. This means that the checksum for the installer
# itself is stable. Don't let that fool you though, it's still
# installing TeX **LIVE** from e.g. ctan.math.... below, which is
# not reproducible.
version('live', '8f8fc301514c08a89a2e97197369c648',
url='ftp://tug.org/historic/systems/texlive/2017/install-tl-unx.tar.gz')
# There does not seem to be a complete list of schemes.
# Examples include:
# full scheme (everything)
# medium scheme (small + more packages and languages)
# small scheme (basic + xetex, metapost, a few languages)
# basic scheme (plain and latex)
# minimal scheme (plain only)
# See:
# https://www.tug.org/texlive/doc/texlive-en/texlive-en.html#x1-25025r6
variant(
'scheme',
default='small',
values=('minimal', 'basic', 'small', 'medium', 'full'),
description='Package subset to install'
)
depends_on('perl', type='build')
def install(self, spec, prefix):
# Using texlive's mirror system leads to mysterious problems,
# in lieu of being able to specify a repository as a variant, hardwire
# a particular (slow, but central) one for now.
_repository = 'http://ctan.math.washington.edu/tex-archive/systems/texlive/tlnet/'
env = os.environ
env['TEXLIVE_INSTALL_PREFIX'] = prefix
perl = which('perl')
scheme = spec.variants['scheme'].value
perl('./install-tl', '-scheme', scheme,
'-repository', _repository,
'-portable', '-profile', '/dev/null')
| lgpl-2.1 | 428,417,736,615,517,630 | 42.620253 | 90 | 0.653511 | false |
junzis/py-adsb-decoder | pyModeS/extra/aero.py | 1 | 5201 | """
Functions for aeronautics in this module
- physical quantities always in SI units
- lat,lon,course and heading in degrees
International Standard Atmosphere
::
p,rho,T = atmos(H) # atmos as function of geopotential altitude H [m]
a = vsound(H) # speed of sound [m/s] as function of H[m]
p = pressure(H) # calls atmos but retruns only pressure [Pa]
T = temperature(H) # calculates temperature [K]
rho = density(H) # calls atmos but retruns only pressure [Pa]
Speed conversion at altitude H[m] in ISA
::
Mach = tas2mach(Vtas,H) # true airspeed (Vtas) to mach number conversion
Vtas = mach2tas(Mach,H) # true airspeed (Vtas) to mach number conversion
Vtas = eas2tas(Veas,H) # equivalent airspeed to true airspeed, H in [m]
Veas = tas2eas(Vtas,H) # true airspeed to equivent airspeed, H in [m]
Vtas = cas2tas(Vcas,H) # Vcas to Vtas conversion both m/s, H in [m]
Vcas = tas2cas(Vtas,H) # Vtas to Vcas conversion both m/s, H in [m]
Vcas = mach2cas(Mach,H) # Mach to Vcas conversion Vcas in m/s, H in [m]
Mach = cas2mach(Vcas,H) # Vcas to mach copnversion Vcas in m/s, H in [m]
"""
import numpy as np
"""Aero and geo Constants """
kts = 0.514444 # knot -> m/s
ft = 0.3048 # ft -> m
fpm = 0.00508 # ft/min -> m/s
inch = 0.0254 # inch -> m
sqft = 0.09290304 # 1 square foot
nm = 1852. # nautical mile -> m
lbs = 0.453592 # pound -> kg
g0 = 9.80665 # m/s2, Sea level gravity constant
R = 287.05287 # m2/(s2 x K), gas constant, sea level ISA
p0 = 101325. # Pa, air pressure, sea level ISA
rho0 = 1.225 # kg/m3, air density, sea level ISA
T0 = 288.15 # K, temperature, sea level ISA
gamma = 1.40 # cp/cv for air
gamma1 = 0.2 # (gamma-1)/2 for air
gamma2 = 3.5 # gamma/(gamma-1) for air
beta = -0.0065 # [K/m] ISA temp gradient below tropopause
r_earth = 6371000. # m, average earth radius
a0 = 340.293988 # m/s, sea level speed of sound ISA, sqrt(gamma*R*T0)
def atmos(H):
# H in metres
T = np.maximum(288.15 - 0.0065 * H, 216.65)
rhotrop = 1.225 * (T / 288.15)**4.256848030018761
dhstrat = np.maximum(0., H - 11000.0)
rho = rhotrop * np.exp(-dhstrat / 6341.552161)
p = rho * R * T
return p, rho, T
def temperature(H):
p, r, T = atmos(H)
return T
def pressure(H):
p, r, T = atmos(H)
return p
def density(H):
p, r, T = atmos(H)
return r
def vsound(H):
"""Speed of sound"""
T = temperature(H)
a = np.sqrt(gamma * R * T)
return a
def distance(lat1, lon1, lat2, lon2, H=0):
"""
Compute spherical distance from spherical coordinates.
For two locations in spherical coordinates
(1, theta, phi) and (1, theta', phi')
cosine( arc length ) =
sin phi sin phi' cos(theta-theta') + cos phi cos phi'
distance = rho * arc length
"""
# phi = 90 - latitude
phi1 = np.radians(90.0 - lat1)
phi2 = np.radians(90.0 - lat2)
# theta = longitude
theta1 = np.radians(lon1)
theta2 = np.radians(lon2)
cos = np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) + np.cos(phi1) * np.cos(phi2)
cos = np.where(cos>1, 1, cos)
arc = np.arccos(cos)
dist = arc * (r_earth + H) # meters, radius of earth
return dist
def bearing(lat1, lon1, lat2, lon2):
lat1 = np.radians(lat1)
lon1 = np.radians(lon1)
lat2 = np.radians(lat2)
lon2 = np.radians(lon2)
x = np.sin(lon2-lon1) * np.cos(lat2)
y = np.cos(lat1) * np.sin(lat2) \
- np.sin(lat1) * np.cos(lat2) * np.cos(lon2-lon1)
initial_bearing = np.arctan2(x, y)
initial_bearing = np.degrees(initial_bearing)
bearing = (initial_bearing + 360) % 360
return bearing
# -----------------------------------------------------
# Speed conversions, altitude H all in meters
# -----------------------------------------------------
def tas2mach(Vtas, H):
"""True Airspeed to Mach number"""
a = vsound(H)
Mach = Vtas/a
return Mach
def mach2tas(Mach, H):
"""Mach number to True Airspeed"""
a = vsound(H)
Vtas = Mach*a
return Vtas
def eas2tas(Veas, H):
"""Equivalent Airspeed to True Airspeed"""
rho = density(H)
Vtas = Veas * np.sqrt(rho0/rho)
return Vtas
def tas2eas(Vtas, H):
"""True Airspeed to Equivalent Airspeed"""
rho = density(H)
Veas = Vtas * np.sqrt(rho/rho0)
return Veas
def cas2tas(Vcas, H):
"""Calibrated Airspeed to True Airspeed"""
p, rho, T = atmos(H)
qdyn = p0*((1.+rho0*Vcas*Vcas/(7.*p0))**3.5-1.)
Vtas = np.sqrt(7.*p/rho*((1.+qdyn/p)**(2./7.)-1.))
return Vtas
def tas2cas(Vtas, H):
"""True Airspeed to Calibrated Airspeed"""
p, rho, T = atmos(H)
qdyn = p*((1.+rho*Vtas*Vtas/(7.*p))**3.5-1.)
Vcas = np.sqrt(7.*p0/rho0*((qdyn/p0+1.)**(2./7.)-1.))
return Vcas
def mach2cas(Mach, H):
"""Mach number to Calibrated Airspeed"""
Vtas = mach2tas(Mach, H)
Vcas = tas2cas(Vtas, H)
return Vcas
def cas2mach(Vcas, H):
"""Calibrated Airspeed to Mach number"""
Vtas = cas2tas(Vcas, H)
Mach = tas2mach(Vtas, H)
return Mach
| mit | 6,628,668,726,650,856,000 | 27.266304 | 93 | 0.584503 | false |
saltastro/saltefficiency | dataquality/upload_throughput.py | 1 | 4898 | import os
import argparse
import glob
import traceback
import mysql
import dataquality as dq
def upload_throughput(sdb, infile, force=False):
"""Upload throughput measurements to the Science Database
Parameters
----------
sdb: ~mysql.sdb
Connection to the Science Database
infile: str
Path to file to upload to the database
force: bool
If True, it will update the database even if an entry
already exists
"""
# parse the name of the file
tab_name, obsdate = dq.parse_filename(infile)
# check if it is already in the table
sel_cmd = "{}_Id, Throughput_Id".format(tab_name)
tab_cmd = "{} join Throughput using (Throughput_Id) join NightInfo using (NightInfo_id) ".format(tab_name)
log_cmd = " Date = '{}-{}-{}'".format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
record = sdb.select(sel_cmd, tab_cmd, log_cmd)
if len(record) > 0 and not force: return
if os.path.basename(infile).startswith('Rss'):
instr='Rss'
elif os.path.basename(infile).startswith('Salticam'):
instr='Salticam'
else:
raise ValueError("File name not recognized")
# parse the file and update or insert into the database
lines = open(infile).readlines()
if len(lines) < 3 :
raise ValueError("Insufficient number of lines in {}".format(infile))
stars = lines[0].strip()
comment = lines[1].strip().strip('\'')
nid = sdb.select('NightInfo_Id', 'NightInfo', log_cmd)[0][0]
#create throughput
try:
tid = sdb.select('Throughput_Id','Throughput', 'NightInfo_Id={}'.format(nid))[0][0]
except:
ins_cmd = "NightInfo_Id = {} , StarsUsed = '{}', Comments = '{}'".format(nid, stars, comment)
sdb.insert(ins_cmd, 'Throughput')
tid = sdb.select('Throughput_Id','Throughput', 'NightInfo_Id={}'.format(nid))[0][0]
if force:
upd_cmd = "StarsUsed = '{}', Comments = '{}'".format(stars, comment)
sdb.update(upd_cmd, 'Throughput', 'Throughput_Id={}'.format(tid))
# upload each of the filters
for l in lines[2:]:
if not l.strip(): return
l = l.split()
if instr == 'Rss':
l[0] = l[0].strip(',')
try:
fid = sdb.select('RssFilter_Id', 'RssFilter', 'Barcode="{}"'.format(l[0]))[0][0]
except IndexError:
raise ValueError('{} is not an RSS Filter'.format(l[0]))
ins_cmd = 'RssFilter_Id={}, RssThroughputMeasurement={}'.format(fid, l[1])
up_cmd = 'RssFilter_Id={} and Throughput_Id={}'.format(fid, tid)
elif instr == 'Salticam':
l[0] = l[0].strip(',')
try:
fid = sdb.select('SalticamFilter_Id', 'SalticamFilter', 'SalticamFilter_Name="{}"'.format(l[0]))[0][0]
except IndexError:
raise ValueError('{} is not an Salticam Filter'.format(l[0]))
ins_cmd = '{}Filter_Id={}, Throughput_Id={}, {}={}'.format(instr, fid, tid, tab_name, l[1])
if len(record)==0:
sdb.insert(ins_cmd, tab_name)
elif force:
up_cmd = '{}Filter_Id={} and Throughput_Id={}'.format(instr, fid, tid)
uid = sdb.select('{}_Id'.format(tab_name), tab_name, up_cmd)[0][0]
sdb.update(ins_cmd, tab_name, '{}_Id={}'.format(tab_name, uid))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Upload throughput measurents ot the SDB')
parser.add_argument('-dir', dest='throughput_dir', action='store',
default='/salt/logs/dataquality/throughput/',
help='Directory with throughput files')
parser.add_argument('-f', dest='force', action='store_const',
const=True, default=False,
help='Force the updates')
parser.add_argument('-e', dest='email', action='store_const',
const=True, default=False,
help='Email error results')
args = parser.parse_args()
user=os.environ['SDBUSER']
password=os.environ['SDBPASS']
sdb=mysql.mysql(sdbhost, sdbname, user, password, port=3306)
#get the file names
error_msg = ''
for infile in glob.glob(args.throughput_dir+'*.txt'):
try:
upload_throughput(sdb, infile, force=args.force)
except ValueError, e:
error_msg += infile + '\n' + traceback.format_exc() + str(e) + '\n\n'
except IOError, e:
error_msg += infile + '\n' + traceback.format_exc() + str(e) + '\n\n'
if error_msg: print(error_msg)
if email and error_msg:
mailuser = os.environ['MAILUSER']
mailpass = os.environ['MAILPASS']
dq.send_email(error_msg, 'UPLOAD_TRHOUGHPUT Error', username=mailuser,
password=mailpass, to=os.environ['TPUTLIST'], sender = os.environ['MAILSENDER'])
| bsd-3-clause | 3,220,284,524,995,481,600 | 37.265625 | 117 | 0.57942 | false |
yashchandak/GNN | Sample_Run/Seq_Dynamic/blogDWdata.py | 1 | 6976 | from __future__ import generators, print_function
import numpy as np
from copy import deepcopy
from random import shuffle
from scipy.io import loadmat
class DataSet(object):
def __init__(self, cfg):
"""Construct a DataSet.
"""
self.cfg = cfg
self.all_walks, self.node_seq = self.get_walks(cfg.walks_dir)
#self.node_seq = self.all_walks[:, -1] # index by ending node
self.all_labels = self.get_labels(cfg.label_dir)
self.all_features= self.get_fetaures(cfg.features_dir)
#Increment the positions by 1 and mark the 0th one as False
self.train_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'train_ids.npy')))
self.val_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'val_ids.npy')))
self.test_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'test_ids.npy')))
# [!!!IMP!!]Assert no overlap between test/val/train nodes
self.label_cache, self.update_cache = {0:list(self.all_labels[0])}, {}
def get_walks(self, path):
walks = np.fliplr(np.loadtxt(path, dtype=np.int)) # reverse the sequence
seq = deepcopy(walks[:,-1])
#rotate around the sequences, such that ends are padded with zeros
for i in range(np.shape(walks)[0]):
non_zeros = np.sum(walks[i] > 0)
walks[i] = np.roll(walks[i], non_zeros)
return walks, seq
def get_fetaures(self, path):
# Serves 2 purpose:
# a) add feature for dummy node 0 a.k.a <EOS> and <unlabeled>
# b) increments index of all features by 1, thus aligning it with indices in walks
all_features = np.load(path)
all_features = all_features.astype(np.float32, copy=False) # Required conversion for Python3
all_features = np.concatenate(([np.zeros(all_features.shape[1])], all_features), 0)
return all_features
def get_labels(self, path):
# Labels start with node '0'; Walks_data with node '1'
# To get corresponding mapping, increment the label node number by 1
# add label for dummy node 0 a.k.a <EOS> and <unlabeled>
all_labels = np.load(path)
all_labels = np.concatenate(([np.zeros(all_labels.shape[1])], all_labels), 0)
return all_labels
def accumulate_label_cache(self, labels, nodes):
#Aggregates all the labels for the corresponding nodes
#and tracks the count of updates made
default = (self.all_labels[0], 0) #Initial estimate -> all_zeros
labels = labels[0]
if self.cfg.data_sets.binary_label_updates:
#Convert to binary and keep only the maximum value as 1
amax = np.argmax(labels, axis = 1)
labels = np.zeros(labels.shape)
for idx, pos in enumerate(amax):
labels[idx,pos] = 1
for idx, node in enumerate(nodes):
prv_label, prv_count = self.update_cache.get(node, default)
new_label = prv_label + labels[idx]
new_count = prv_count + 1
self.update_cache[node] = (new_label, new_count)
def update_label_cache(self):
#Average all the predictions made for the corresponding nodes and reset cache
for k, v in self.update_cache.items():
self.label_cache[k] = list(v[0]/v[1])
self.update_cache = {}
def get_nodes(self, dataset):
nodes = []
if dataset == 'train':
nodes = self.train_nodes
elif dataset == 'val':
nodes = self.val_nodes
elif dataset == 'test':
nodes = self.test_nodes
elif dataset == 'all':
# Get all the nodes except the 0th node
nodes = [True]*len(self.train_nodes)
nodes[0] = False
else:
raise ValueError
return nodes
def next_batch(self, dataset, batch_size, shuffle=True):
nodes = self.get_nodes(dataset)
label_len = np.shape(self.all_labels)[1]
max_len = self.all_walks.shape[1]
# Get position of all walks ending with desired set of nodes
pos = []
seq = []
for node in np.where(nodes)[0]:
temp = np.where(self.node_seq == node)[0]
pos.extend(temp)
seq.extend([node]*len(temp))
pos = np.array(pos)
seq = np.array(seq)
if shuffle:
indices = np.random.permutation(len(pos))
pos = pos[indices]
seq = seq[indices]
if batch_size == -1:
batch_size = len(pos)
tot = len(pos)//batch_size
for i in range(0, len(pos), batch_size):
x = self.all_walks[pos[i: i + batch_size]]
temp = np.array(x)>0 #get locations of all zero inputs
lengths = max_len - np.sum(temp, axis=1)
x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)
# get labels for valid data points, for others: select the 0th label
x2 = [[self.label_cache.get(item, self.label_cache[0]) for item in row] for row in x]
y = [list(self.all_labels[item]) for item in seq[i: i+batch_size]]
# get features for all data points
x = [[self.all_features[item] for item in row] for row in x]
#seq = self.node_seq[pos[i: i + batch_size]]
yield (x, x2, seq, y, tot, lengths)
def next_batch_same(self, dataset, node_count=1):
nodes = self.get_nodes(dataset)
pos = []
counts = []
seq = []
for node in np.where(nodes)[0]:
temp = np.where(self.node_seq == node)[0]
counts.append(len(temp))
seq.append(node)
pos.extend(temp)
pos = np.array(pos)
start = 0
max_len = self.all_walks.shape[1]
# Get a batch of all walks for 'node_count' number of node
for idx in range(0, len(counts), node_count):
#print(idx)
stop = start + np.sum(counts[idx:idx+node_count]) #start + total number of walks to be consiudered this time
x = self.all_walks[pos[start:stop]] #get the walks corresponding to respective positions
temp = np.array(x)>0 #get locations of all zero inputs
lengths = max_len - np.sum(temp, axis=1)
x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)
# get labels for valid data points, for others: select the 0th label
x2 = [[self.label_cache.get(item, self.label_cache[0]) for item in row] for row in x]
y = [list(self.all_labels[item]) for item in x[-1,:]] #Not useful, only presetn for sake of placeholder
# get features for all data points
x = [[self.all_features[item] for item in row] for row in x]
start = stop
yield (x, x2, seq[idx:idx+node_count], counts[idx:idx+node_count], y, lengths)
| mit | -3,829,700,722,819,435,500 | 38.862857 | 120 | 0.578842 | false |
DOV-Vlaanderen/pydov | setup.py | 1 | 2043 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('requirements_dev.txt') as f:
# ignore the general requirements
requirements_dev = f.read().splitlines()[1:]
with open('requirements_doc.txt') as f:
requirements_doc = f.read().splitlines()
with open('requirements_vectorfile.txt') as f:
requirements_vectorfile = f.read().splitlines()
setup(
name='pydov',
version='2.1.0',
description=("A Python package to download data from Databank Ondergrond "
"Vlaanderen (DOV)."),
long_description=readme,
long_description_content_type='text/markdown',
author="DOV-Vlaanderen",
author_email='[email protected]',
url='https://github.com/DOV-Vlaanderen/pydov',
packages=find_packages(
include=['pydov']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='pydov',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Natural Language :: Dutch',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
],
test_suite='tests',
tests_require=requirements_dev,
extras_require={
'docs': requirements_doc,
'devs': requirements_dev,
'vectorfile': requirements_vectorfile
}
)
| mit | 6,766,960,254,937,599,000 | 31.951613 | 78 | 0.619677 | false |
ales-erjavec/orange-canvas | orangecanvas/scheme/tests/__init__.py | 1 | 2700 | """
Scheme tests
"""
from AnyQt.QtCore import QObject, QEventLoop, QTimer, QCoreApplication, QEvent
from typing import List
class EventSpy(QObject):
"""
A testing utility class (similar to QSignalSpy) to record events
delivered to a QObject instance.
Note
----
Only event types can be recorded (as QEvent instances are deleted
on delivery).
Note
----
Can only be used with a QCoreApplication running.
Parameters
----------
object : QObject
An object whose events need to be recorded.
etype : Union[QEvent.Type, Sequence[QEvent.Type]
A event type (or types) that should be recorded
"""
def __init__(self, object: QObject, etype, **kwargs):
super().__init__(**kwargs)
if not isinstance(object, QObject):
raise TypeError
self.__object = object
try:
len(etype)
except TypeError:
etypes = {etype}
else:
etypes = set(etype)
self.__etypes = etypes
self.__record = []
self.__loop = QEventLoop()
self.__timer = QTimer(self, singleShot=True)
self.__timer.timeout.connect(self.__loop.quit)
self.__object.installEventFilter(self)
def wait(self, timeout=5000):
"""
Start an event loop that runs until a spied event or a timeout occurred.
Parameters
----------
timeout : int
Timeout in milliseconds.
Returns
-------
res : bool
True if the event occurred and False otherwise.
Example
-------
>>> app = QCoreApplication.instance() or QCoreApplication([])
>>> obj = QObject()
>>> spy = EventSpy(obj, QEvent.User)
>>> app.postEvent(obj, QEvent(QEvent.User))
>>> spy.wait()
True
>>> print(spy.events())
[1000]
"""
count = len(self.__record)
self.__timer.stop()
self.__timer.setInterval(timeout)
self.__timer.start()
self.__loop.exec_()
self.__timer.stop()
return len(self.__record) != count
def eventFilter(self, reciever: QObject, event: QEvent) -> bool:
if reciever is self.__object and event.type() in self.__etypes:
self.__record.append(event.type())
if self.__loop.isRunning():
self.__loop.quit()
return super().eventFilter(reciever, event)
def events(self) -> List[QEvent.Type]:
"""
Return a list of all (listened to) event types that occurred.
Returns
-------
events : List[QEvent.Type]
"""
return list(self.__record)
| gpl-3.0 | 5,709,660,334,887,075,000 | 26.835052 | 80 | 0.554815 | false |
Nicoretti/libslack | libslack/tests/__init__.py | 1 | 1485 | #!/usr/bin/env python3
#
# Copyright (c) 2015, Nicola Coretti
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__version__ = "0.5.0"
__author__ = 'Nicola Coretti'
__email__ = '[email protected]'
__all__ = ['slackapi_tests']
| bsd-2-clause | -2,966,026,265,261,736,000 | 50.206897 | 81 | 0.767677 | false |
AlexMathew/csipy-exercises | solution/words.py | 1 | 1277 | import sys
def setup(words):
new_words = []
for word in words:
new_words.append(word.lower())
words = new_words
# This could have been done easier with list comprehensions.
# words = [word.lower() for word in words]
wordset = set()
wordcount = dict()
for word in words:
prev_size = len(wordset)
wordset.add(word)
new_size = len(wordset)
if new_size > prev_size:
wordcount[word] = words.count(word)
return wordset, wordcount
def main():
if len(sys.argv) == 1 or len(sys.argv) > 2:
print 'FORMAT : python words.py --count|--set'
sys.exit(0)
# This could have been done by using exception handlers for IndexError.
option = sys.argv[1]
if option not in ['--count', '--set']:
print 'FORMAT : python words.py --count|--set'
sys.exit(0)
try:
with open('input.txt', 'r') as f:
text = f.read()
except Exception:
print 'Rename one of the two files there as input.txt'
words = text.split()
wordset, wordcount = setup(words)
if option == '--set':
content = " ".join(sorted(list(wordset)))
with open('output.txt', 'w') as f:
f.write(content)
elif option == '--count':
content = " ".join(sorted(wordcount, key=wordcount.get, reverse=True))
with open('output.txt', 'w') as f:
f.write(content)
if __name__ == '__main__':
main() | mit | 7,769,703,426,424,290,000 | 26.782609 | 73 | 0.651527 | false |
inf0-warri0r/music_cat | classifier/classifier.py | 1 | 4355 | #!/usr/bin/env python
"""
Author : tharindra galahena (inf0_warri0r)
Project: classifing music using neural network
Blog : http://www.inf0warri0r.blogspot.com
Date : 23/05/2013
License:
Copyright 2013 Tharindra Galahena
This is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version. This is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
* You should have received a copy of the GNU General Public License along with
this. If not, see http://www.gnu.org/licenses/.
"""
from PySide import QtCore, QtGui
from classify import Ui_classifier
import os
import sys
import file_read
import histograme
import thread
import neural_net
import plot
class MyWidget(QtGui.QMainWindow, Ui_classifier):
def __init__(self, parent=None):
super(MyWidget, self).__init__(parent)
self.setupUi(self)
self.file_name = ""
self.hist_u = list()
self.hist_n = list()
self.net = neural_net.neural(10, 1, 3, 15, 0.001, 0.0)
self.net.init()
self.net.put_weights(self.load())
self.img = ""
self.convert.clicked.connect(self.convert_file)
self.classify.clicked.connect(self.classify_func)
self.browse.clicked.connect(self.browse_func)
self.hist_lable.setScaledContents(True)
self.run = True
self.timer = QtCore.QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.re_draw)
self.timer.start()
def browse_func(self):
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file')
self.music_file.setText(str(fname))
def re_draw(self):
if not self.run:
QtGui.QMessageBox.about(self, "Done", "Done !!!")
self.run = True
return 0
def convert_file(self):
r, w = os.pipe()
self.file_name = self.music_file.text()
if self.file_name == "":
QtGui.QMessageBox.about(self, "ERROR", "invaild file")
return 0
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
os.execlp("ffmpeg", "ffmpeg", "-i",
self.file_name, "-y", "out.aif")
exit(0)
try:
thread.start_new_thread(self.thread_func, ())
except Exception:
QtGui.QMessageBox.about(self, "ERROR", "thread error")
def thread_func(self):
self.run = True
f = file_read.file_read(("out.aif", "out.aif"))
f.convert()
f.save("./")
self.image = f.image
h = histograme.histograme(f.image)
h.create_histograme()
self.hist_u = h.unnormaliced_histograme()
self.hist_n = h.normalice_histograme()
print "done"
self.run = False
def classify_func(self):
p = plot.plot(self.hist_u, 600, 400, (256, 125, 0), (256, 256, 256))
p.set_scales()
p.set_plot()
p.draw("hist.jpg")
qimage = QtGui.QImage("out.aif.jpg")
pix = QtGui.QPixmap.fromImage(qimage)
self.label.setPixmap(pix)
qimage = QtGui.QImage("hist.jpg")
pix = QtGui.QPixmap.fromImage(qimage)
self.hist_lable.setPixmap(pix)
try:
thread.start_new_thread(self.thread_func2, ())
except Exception:
QtGui.QMessageBox.about(self, "ERROR", "thread error")
def thread_func2(self):
print self.hist_n
out = self.net.update(self.hist_n)
print out
self.gener.setText("")
if out[0] < 0.5:
self.type = "Rock"
else:
self.type = "Classic"
self.gener.setText(self.type)
def load(self):
f = open('weights', 'r')
cat = f.read()
f.close()
weights = list()
lst = cat.splitlines()
for i in range(0, len(lst)):
weights.append(float(lst[i]))
return weights
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = MyWidget()
window.show()
sys.exit(app.exec_())
| agpl-3.0 | 809,784,179,446,759,300 | 27.279221 | 78 | 0.595867 | false |
dials/dials | tests/algorithms/indexing/test_non_primitive_basis.py | 1 | 1945 | import pytest
import scitbx.matrix
from cctbx import sgtbx
from cctbx.sgtbx import bravais_types
from dxtbx.model import Crystal, Experiment, ExperimentList
from dials.algorithms.indexing import assign_indices, non_primitive_basis
from dials.array_family import flex
@pytest.mark.parametrize("space_group_symbol", bravais_types.acentric)
def test_detect(space_group_symbol):
sgi = sgtbx.space_group_info(space_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=1000)
ms = cs.build_miller_set(anomalous_flag=True, d_min=1).expand_to_p1()
result = non_primitive_basis.detect(ms.indices())
if sgi.group().conventional_centring_type_symbol() != "P":
assert result is not None
assert isinstance(result, scitbx.matrix.sqr)
assert result.n == (3, 3)
else:
assert result is None
@pytest.mark.parametrize("space_group_symbol", bravais_types.acentric)
def test_correct(space_group_symbol):
sgi = sgtbx.space_group_info(space_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=1000)
ms = cs.build_miller_set(anomalous_flag=True, d_min=1).expand_to_p1()
# the reciprocal matrix
B = scitbx.matrix.sqr(cs.unit_cell().fractionalization_matrix()).transpose()
crystal = Crystal(B, sgtbx.space_group())
expts = ExperimentList([Experiment(crystal=crystal)])
refl = flex.reflection_table()
refl["miller_index"] = ms.indices()
refl["rlp"] = B.elems * ms.indices().as_vec3_double()
refl["imageset_id"] = flex.int(len(refl))
refl["xyzobs.mm.value"] = flex.vec3_double(len(refl))
non_primitive_basis.correct(expts, refl, assign_indices.AssignIndicesGlobal())
cs_corrected = expts.crystals()[0].get_crystal_symmetry()
assert cs_corrected.change_of_basis_op_to_primitive_setting().is_identity_op()
assert (
cs.change_of_basis_op_to_primitive_setting().apply(ms.indices())
== refl["miller_index"]
)
| bsd-3-clause | 7,598,806,288,202,229,000 | 37.137255 | 82 | 0.705398 | false |
vijeth-aradhya/coala-bears | bears/js/ESLintBear.py | 1 | 2949 | import json
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.NpmRequirement import NpmRequirement
from coalib.results.Diff import Diff
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
@linter(executable='eslint',
use_stdin=True,
use_stderr=True)
class ESLintBear:
"""
Check JavaScript and JSX code for style issues and semantic errors.
Find out more at <http://eslint.org/docs/rules/>.
"""
LANGUAGES = {'JavaScript', 'JSX'}
REQUIREMENTS = {NpmRequirement('eslint', '2')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/38739'
CAN_DETECT = {'Syntax'}
CAN_FIX = {'Formatting'}
severity_map = {2: RESULT_SEVERITY.MAJOR,
1: RESULT_SEVERITY.NORMAL,
0: RESULT_SEVERITY.INFO}
@staticmethod
def create_arguments(filename, file, config_file,
eslint_config: str=''):
"""
:param eslint_config: The location of the .eslintrc config file.
"""
args = (
'--no-ignore',
'--no-color',
'-f=json',
'--stdin',
'--stdin-filename=' + filename,
)
if eslint_config:
args += ('--config', eslint_config)
else:
args += ('--config', config_file)
return args
@staticmethod
def generate_config(filename, file):
return '{"extends": "eslint:recommended"}'
def process_output(self, output, filename, file):
if output[1]:
self.warn('While running {0}, some issues were found:'
.format(self.__class__.__name__))
self.warn(output[1])
if not file or not output[0]:
return
output = json.loads(output[0])
lines = ''.join(file)
assert len(output) == 1
for result in output[0]['messages']:
if 'fix' not in result:
diffs = None
else:
fix = result['fix']
start, end = fix['range']
replacement_text = fix['text']
new_output = lines[:start] + replacement_text + lines[end:]
diffs = {filename: Diff.from_string_arrays(
lines.splitlines(True), new_output.splitlines(True))}
origin = (
'{class_name} ({rule})'.format(class_name=type(self).__name__,
rule=result['ruleId'])
if result['ruleId'] is not None else self)
yield Result.from_values(
origin=origin, message=result['message'],
file=filename, line=result['line'], diffs=diffs,
severity=self.severity_map[result['severity']])
| agpl-3.0 | 3,875,000,536,192,884,700 | 31.766667 | 78 | 0.547304 | false |
googleapis/gapic-generator-python | tests/integration/goldens/asset/samples/generated_samples/cloudasset_generated_asset_v1_asset_service_list_feeds_async.py | 1 | 1458 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListFeeds
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-asset
# [START cloudasset_generated_asset_v1_AssetService_ListFeeds_async]
from google.cloud import asset_v1
async def sample_list_feeds():
"""Snippet for list_feeds"""
# Create a client
client = asset_v1.AssetServiceAsyncClient()
# Initialize request argument(s)
request = asset_v1.ListFeedsRequest(
)
# Make the request
response = await client.list_feeds(request=request)
# Handle response
print("{}".format(response))
# [END cloudasset_generated_asset_v1_AssetService_ListFeeds_async]
| apache-2.0 | -61,573,664,840,941,890 | 30.695652 | 85 | 0.741427 | false |
bc-python-tools/mistool | test/python/dict/test_dictvalues.py | 1 | 1422 | #!/usr/bin/env python3
# --------------------- #
# -- SEVERAL IMPORTS -- #
# --------------------- #
from pathlib import Path
from pytest import fixture
from orpyste.data import ReadBlock as READ
# ------------------- #
# -- MODULE TESTED -- #
# ------------------- #
from mistool import python_use
# ----------------------- #
# -- GENERAL CONSTANTS -- #
# ----------------------- #
THIS_DIR = Path(__file__).parent
DICT_VALUES_FUNCTION = python_use.dictvalues
# ----------------------- #
# -- DATAS FOR TESTING -- #
# ----------------------- #
THE_DATAS_FOR_TESTING = READ(
content = THIS_DIR / 'dictvalues.txt',
mode = {"keyval:: =": ":default:"}
)
@fixture(scope="module")
def or_datas(request):
THE_DATAS_FOR_TESTING.build()
def remove_extras():
THE_DATAS_FOR_TESTING.remove_extras()
request.addfinalizer(remove_extras)
# ------------- #
# -- QUOTING -- #
# ------------- #
def test_python_use_dictvalues(or_datas):
tests = THE_DATAS_FOR_TESTING.mydict("std nosep nonb")
for testname, infos in tests.items():
onedict = infos['onedict']
onedict = eval(onedict)
singlevalues_wanted = infos['singlevalues']
singlevalues_wanted = eval(singlevalues_wanted)
singlevalues_found = DICT_VALUES_FUNCTION(onedict)
singlevalues_found = sorted(singlevalues_found)
assert singlevalues_wanted == singlevalues_found
| gpl-3.0 | 5,746,021,502,840,901,000 | 20.876923 | 58 | 0.549226 | false |
priseborough/ardupilot | Tools/ardupilotwaf/ardupilotwaf.py | 1 | 14418 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
from waflib import Build, ConfigSet, Configure, Context, Errors, Logs, Options, Utils
from waflib.Configure import conf
from waflib.Scripting import run_command
from waflib.TaskGen import before_method, feature
import os.path, os
from collections import OrderedDict
import ap_persistent
SOURCE_EXTS = [
'*.S',
'*.c',
'*.cpp',
]
COMMON_VEHICLE_DEPENDENT_LIBRARIES = [
'AP_AccelCal',
'AP_ADC',
'AP_AHRS',
'AP_Airspeed',
'AP_Baro',
'AP_BattMonitor',
'AP_BoardConfig',
'AP_Common',
'AP_Compass',
'AP_Declination',
'AP_GPS',
'AP_HAL',
'AP_HAL_Empty',
'AP_InertialSensor',
'AP_Math',
'AP_Mission',
'AP_NavEKF2',
'AP_NavEKF3',
'AP_Notify',
'AP_OpticalFlow',
'AP_Param',
'AP_Rally',
'AP_RangeFinder',
'AP_Scheduler',
'AP_SerialManager',
'AP_Terrain',
'AP_Vehicle',
'AP_InternalError',
'AP_Logger',
'Filter',
'GCS_MAVLink',
'RC_Channel',
'SRV_Channel',
'StorageManager',
'AP_Tuning',
'AP_RPM',
'AP_RSSI',
'AP_Mount',
'AP_Module',
'AP_Button',
'AP_ICEngine',
'AP_Frsky_Telem',
'AP_FlashStorage',
'AP_Relay',
'AP_ServoRelayEvents',
'AP_Volz_Protocol',
'AP_SBusOut',
'AP_IOMCU',
'AP_Parachute',
'AP_RAMTRON',
'AP_RCProtocol',
'AP_Radio',
'AP_TempCalibration',
'AP_VisualOdom',
'AP_BLHeli',
'AP_ROMFS',
'AP_Proximity',
'AP_Gripper',
'AP_RTC',
'AC_Sprayer',
'AC_Fence',
'AC_Avoidance',
'AP_LandingGear',
'AP_RobotisServo',
'AP_ToshibaCAN',
]
def get_legacy_defines(sketch_name):
return [
'APM_BUILD_DIRECTORY=APM_BUILD_' + sketch_name,
'SKETCH="' + sketch_name + '"',
'SKETCHNAME="' + sketch_name + '"',
]
IGNORED_AP_LIBRARIES = [
'doc',
'AP_Scripting', # this gets explicitly included when it is needed and should otherwise never be globbed in
]
def ap_autoconfigure(execute_method):
"""
Decorator that enables context commands to run *configure* as needed.
"""
def execute(self):
"""
Wraps :py:func:`waflib.Context.Context.execute` on the context class
"""
if not Configure.autoconfig:
return execute_method(self)
# Disable autoconfig so waf's version doesn't run (and don't end up on loop of bad configure)
Configure.autoconfig = False
if self.variant == '':
raise Errors.WafError('The project is badly configured: run "waf configure" again!')
env = ConfigSet.ConfigSet()
do_config = False
try:
p = os.path.join(Context.out_dir, Build.CACHE_DIR, self.variant + Build.CACHE_SUFFIX)
env.load(p)
except EnvironmentError:
raise Errors.WafError('The project is not configured for board {0}: run "waf configure --board {0} [...]" first!'.format(self.variant))
lock_env = ConfigSet.ConfigSet()
try:
lock_env.load(os.path.join(Context.top_dir, Options.lockfile))
except EnvironmentError:
Logs.warn('Configuring the project')
do_config = True
else:
if lock_env.run_dir != Context.run_dir:
do_config = True
else:
h = 0
for f in env.CONFIGURE_FILES:
try:
h = Utils.h_list((h, Utils.readf(f, 'rb')))
except EnvironmentError:
do_config = True
break
else:
do_config = h != env.CONFIGURE_HASH
if do_config:
cmd = lock_env.config_cmd or 'configure'
tmp = Options.options.__dict__
if env.OPTIONS and sorted(env.OPTIONS.keys()) == sorted(tmp.keys()):
Options.options.__dict__ = env.OPTIONS
else:
raise Errors.WafError('The project configure options have changed: run "waf configure" again!')
try:
run_command(cmd)
finally:
Options.options.__dict__ = tmp
run_command(self.cmd)
else:
return execute_method(self)
return execute
def ap_configure_post_recurse():
post_recurse_orig = Configure.ConfigurationContext.post_recurse
def post_recurse(self, node):
post_recurse_orig(self, node)
self.all_envs[self.variant].CONFIGURE_FILES = self.files
self.all_envs[self.variant].CONFIGURE_HASH = self.hash
return post_recurse
@conf
def ap_get_all_libraries(bld):
if bld.env.BOOTLOADER:
# we don't need the full set of libraries for the bootloader build
return ['AP_HAL']
libraries = []
for lib_node in bld.srcnode.ant_glob('libraries/*', dir=True, src=False):
name = lib_node.name
if name in IGNORED_AP_LIBRARIES:
continue
if name.startswith('AP_HAL'):
continue
if name == 'SITL':
continue
libraries.append(name)
libraries.extend(['AP_HAL', 'AP_HAL_Empty'])
return libraries
@conf
def ap_common_vehicle_libraries(bld):
libraries = COMMON_VEHICLE_DEPENDENT_LIBRARIES
if bld.env.DEST_BINFMT == 'pe':
libraries += [
'AC_Fence',
'AC_AttitudeControl',
]
return libraries
_grouped_programs = {}
@conf
def ap_program(bld,
program_groups='bin',
program_dir=None,
use_legacy_defines=True,
program_name=None,
**kw):
if 'target' in kw:
bld.fatal('Do not pass target for program')
if 'defines' not in kw:
kw['defines'] = []
if 'source' not in kw:
kw['source'] = bld.path.ant_glob(SOURCE_EXTS)
if not program_name:
program_name = bld.path.name
if use_legacy_defines:
kw['defines'].extend(get_legacy_defines(bld.path.name))
kw['cxxflags'] = kw.get('cxxflags', []) + ['-include', 'ap_config.h']
kw['features'] = kw.get('features', []) + bld.env.AP_PROGRAM_FEATURES
program_groups = Utils.to_list(program_groups)
if not program_dir:
program_dir = program_groups[0]
name = os.path.join(program_dir, program_name)
tg_constructor = bld.program
if bld.env.AP_PROGRAM_AS_STLIB:
tg_constructor = bld.stlib
else:
if bld.env.STATIC_LINKING:
kw['features'].append('static_linking')
tg = tg_constructor(
target='#%s' % name,
name=name,
program_name=program_name,
program_dir=program_dir,
**kw
)
if 'use' in kw and bld.env.STATIC_LINKING:
# ensure we link against vehicle library
tg.env.STLIB += [kw['use']]
for group in program_groups:
_grouped_programs.setdefault(group, []).append(tg)
@conf
def ap_example(bld, **kw):
kw['program_groups'] = 'examples'
ap_program(bld, use_legacy_defines=False, **kw)
def unique_list(items):
'''remove duplicate elements from a list while maintaining ordering'''
return list(OrderedDict.fromkeys(items))
@conf
def ap_stlib(bld, **kw):
if 'name' not in kw:
bld.fatal('Missing name for ap_stlib')
if 'ap_vehicle' not in kw:
bld.fatal('Missing ap_vehicle for ap_stlib')
if 'ap_libraries' not in kw:
bld.fatal('Missing ap_libraries for ap_stlib')
kw['ap_libraries'] = unique_list(kw['ap_libraries'] + bld.env.AP_LIBRARIES)
for l in kw['ap_libraries']:
bld.ap_library(l, kw['ap_vehicle'])
kw['features'] = kw.get('features', []) + ['cxx', 'cxxstlib']
kw['target'] = kw['name']
kw['source'] = []
bld.stlib(**kw)
_created_program_dirs = set()
@feature('cxxstlib', 'cxxprogram')
@before_method('process_rule')
def ap_create_program_dir(self):
if not hasattr(self, 'program_dir'):
return
if self.program_dir in _created_program_dirs:
return
self.bld.bldnode.make_node(self.program_dir).mkdir()
_created_program_dirs.add(self.program_dir)
@feature('cxxstlib')
@before_method('process_rule')
def ap_stlib_target(self):
if self.target.startswith('#'):
self.target = self.target[1:]
self.target = '#%s' % os.path.join('lib', self.target)
@conf
def ap_find_tests(bld, use=[]):
if not bld.env.HAS_GTEST:
return
features = []
if bld.cmd == 'check':
features.append('test')
use = Utils.to_list(use)
use.append('GTEST')
includes = [bld.srcnode.abspath() + '/tests/']
for f in bld.path.ant_glob(incl='*.cpp'):
ap_program(
bld,
features=features,
includes=includes,
source=[f],
use=use,
program_name=f.change_ext('').name,
program_groups='tests',
use_legacy_defines=False,
cxxflags=['-Wno-undef'],
)
_versions = []
@conf
def ap_version_append_str(ctx, k, v):
ctx.env['AP_VERSION_ITEMS'] += [(k, '"{}"'.format(os.environ.get(k, v)))]
@conf
def ap_version_append_int(ctx, k, v):
ctx.env['AP_VERSION_ITEMS'] += [(k,v)]
@conf
def write_version_header(ctx, tgt):
with open(tgt, 'w') as f:
print(
'''// auto-generated header, do not edit
#pragma once
#ifndef FORCE_VERSION_H_INCLUDE
#error ap_version.h should never be included directly. You probably want to include AP_Common/AP_FWVersion.h
#endif
''', file=f)
for k, v in ctx.env['AP_VERSION_ITEMS']:
print('#define {} {}'.format(k, v), file=f)
@conf
def ap_find_benchmarks(bld, use=[]):
if not bld.env.HAS_GBENCHMARK:
return
includes = [bld.srcnode.abspath() + '/benchmarks/']
for f in bld.path.ant_glob(incl='*.cpp'):
ap_program(
bld,
features=['gbenchmark'],
includes=includes,
source=[f],
use=use,
program_name=f.change_ext('').name,
program_groups='benchmarks',
use_legacy_defines=False,
)
def test_summary(bld):
from io import BytesIO
import sys
if not hasattr(bld, 'utest_results'):
Logs.info('check: no test run')
return
fails = []
for filename, exit_code, out, err in bld.utest_results:
Logs.pprint('GREEN' if exit_code == 0 else 'YELLOW',
' %s' % filename,
'returned %d' % exit_code)
if exit_code != 0:
fails.append(filename)
elif not bld.options.check_verbose:
continue
if len(out):
buf = BytesIO(out)
for line in buf:
print(" OUT: %s" % line.decode(), end='', file=sys.stderr)
print()
if len(err):
buf = BytesIO(err)
for line in buf:
print(" ERR: %s" % line.decode(), end='', file=sys.stderr)
print()
if not fails:
Logs.info('check: All %u tests passed!' % len(bld.utest_results))
return
Logs.error('check: %u of %u tests failed' %
(len(fails), len(bld.utest_results)))
for filename in fails:
Logs.error(' %s' % filename)
bld.fatal('check: some tests failed')
_build_commands = {}
def _process_build_command(bld):
if bld.cmd not in _build_commands:
return
params = _build_commands[bld.cmd]
targets = params['targets']
if targets:
if bld.targets:
bld.targets += ',' + targets
else:
bld.targets = targets
program_group_list = Utils.to_list(params['program_group_list'])
bld.options.program_group.extend(program_group_list)
def build_command(name,
targets=None,
program_group_list=[],
doc='build shortcut'):
_build_commands[name] = dict(
targets=targets,
program_group_list=program_group_list,
)
class context_class(Build.BuildContext):
cmd = name
context_class.__doc__ = doc
def _select_programs_from_group(bld):
groups = bld.options.program_group
if not groups:
if bld.targets:
groups = []
else:
groups = ['bin']
if 'all' in groups:
groups = _grouped_programs.keys()
for group in groups:
if group not in _grouped_programs:
bld.fatal('Group %s not found' % group)
tg = _grouped_programs[group][0]
if bld.targets:
bld.targets += ',' + tg.name
else:
bld.targets = tg.name
for tg in _grouped_programs[group][1:]:
bld.targets += ',' + tg.name
def options(opt):
opt.ap_groups = {
'configure': opt.add_option_group('Ardupilot configure options'),
'linux': opt.add_option_group('Linux boards configure options'),
'build': opt.add_option_group('Ardupilot build options'),
'check': opt.add_option_group('Ardupilot check options'),
'clean': opt.add_option_group('Ardupilot clean options'),
}
g = opt.ap_groups['build']
g.add_option('--program-group',
action='append',
default=[],
help='''Select all programs that go in <PROGRAM_GROUP>/ for the build.
Example: `waf --program-group examples` builds all examples. The
special group "all" selects all programs.
''')
g.add_option('--upload',
action='store_true',
help='''Upload applicable targets to a connected device. Not all
platforms may support this. Example: `waf copter --upload` means "build
arducopter and upload it to my board".
''')
g = opt.ap_groups['check']
g.add_option('--check-verbose',
action='store_true',
help='Output all test programs.')
g = opt.ap_groups['clean']
g.add_option('--clean-all-sigs',
action='store_true',
help='''Clean signatures for all tasks. By default, tasks that scan for
implicit dependencies (like the compilation tasks) keep the dependency
information across clean commands, so that that information is changed
only when really necessary. Also, some tasks that don't really produce
files persist their signature. This option avoids that behavior when
cleaning the build.
''')
def build(bld):
bld.add_pre_fun(_process_build_command)
bld.add_pre_fun(_select_programs_from_group)
| gpl-3.0 | 3,844,010,442,105,174,500 | 26.358634 | 147 | 0.578027 | false |
usc-isi/horizon-old | horizon/horizon/dashboards/settings/user/urls.py | 1 | 1066 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(
template_name='settings/user/settings.html'),
name='index'))
| apache-2.0 | -3,605,575,973,021,152,000 | 37.071429 | 78 | 0.722326 | false |
codelucas/facebook-context | backend-flask/backend/example.py | 1 | 15518 | #!/usr/bin/env python
# Copyright 2013 AlchemyAPI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from alchemyapi import AlchemyAPI
import json
demo_text = 'Yesterday dumb Bob destroyed my fancy iPhone in beautiful Denver, Colorado. I guess I will have to head over to the Apple Store and buy a new one.'
demo_url = 'http://www.npr.org/2013/11/26/247336038/dont-stuff-the-turkey-and-other-tips-from-americas-test-kitchen'
demo_html = '<html><head><title>Python Demo | AlchemyAPI</title></head><body><h1>Did you know that AlchemyAPI works on HTML?</h1><p>Well, you do now.</p></body></html>'
print('')
print('')
print(' , ')
print(' .I7777~ ')
print(' .I7777777 ')
print(' +. 77777777 ')
print(' =???, I7777777= ')
print('=?????? 7777777? ,:::===? ')
print('=???????. 777777777777777777~ .77: ?? :7 =$, :$$$$$$+ =$? ')
print(' ????????: .777777777777777777 II77 ?? :7 $$7 :$? 7$7 =$? ')
print(' .???????= +7777777777777777 .7 =7: ?? :7777+ :7:I777? ?777I= 77~777? ,777I I7 77 +$?$: :$? $$ =$? ')
print(' ???????+ ~777???+===::: :7+ ~7 ?? .77 +7 :7?. II 7~ ,I7 77+ I77 ~7 ?7 =7: .$, =$ :$? ,$$? =$? ')
print(' ,???????~ 77 7: ?? ?I. 7 :7 :7 ~7 7 77 =7: 7 7 7~ 7$ $= :$$$$$$~ =$? ')
print(' .??????? ,???I77777777777~ :77777777~ ?? 7: :7 :7 777777777:77 =7 7 +7 ~7 $$$$$$$$I :$? =$? ')
print(' .??????? ,7777777777777777 7= 77 ?? I+ 7 :7 :7 ?? 7,77 =7 7 7~ 7, =$7 $$, :$? =$? ')
print(' .???????. I77777777777777777 +7 ,7??? 77 I7 :7 :7 7~ .?7 77 =7 7 ,77I $+ 7$ :$? =$? ')
print(' ,???????= :77777777777777777~ 7= ~7?? ~I77777 :7 :7 ,777777. 77 =7 7 77, +$ .$::$? =$? ')
print(',??????? :7777777 77 ')
print(' =????? ,7777777 77= ')
print(' +?+ 7777777? ')
print(' + ~7777777 ')
print(' I777777 ')
print(' :~ ')
#Create the AlchemyAPI Object
alchemyapi = AlchemyAPI()
print('')
print('')
print('############################################')
print('# Entity Extraction Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.entities('text',demo_text, { 'sentiment':1 })
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Entities ##')
for entity in response['entities']:
print('text: ', entity['text'].encode('utf-8'))
print('type: ', entity['type'])
print('relevance: ', entity['relevance'])
print('sentiment: ', entity['sentiment']['type'])
if 'score' in entity['sentiment']:
print('sentiment score: ' + entity['sentiment']['score'])
print('')
else:
print('Error in entity extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Keyword Extraction Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.keywords('text',demo_text, { 'sentiment':1 })
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Keywords ##')
for keyword in response['keywords']:
print('text: ', keyword['text'].encode('utf-8'))
print('relevance: ', keyword['relevance'])
print('sentiment: ', keyword['sentiment']['type'])
if 'score' in keyword['sentiment']:
print('sentiment score: ' + keyword['sentiment']['score'])
print('')
else:
print('Error in keyword extaction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Concept Tagging Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.concepts('text',demo_text)
if response['status'] == 'OK':
print('## Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Concepts ##')
for concept in response['concepts']:
print('text: ', concept['text'])
print('relevance: ', concept['relevance'])
print('')
else:
print('Error in concept tagging call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Sentiment Analysis Example #')
print('############################################')
print('')
print('')
print('Processing html: ', demo_html)
print('')
response = alchemyapi.sentiment('html',demo_html)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Document Sentiment ##')
print('type: ', response['docSentiment']['type'])
if 'score' in response['docSentiment']:
print('score: ', response['docSentiment']['score'])
else:
print('Error in sentiment analysis call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Targeted Sentiment Analysis Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.sentiment_targeted('text',demo_text, 'Denver')
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Targeted Sentiment ##')
print('type: ', response['docSentiment']['type'])
if 'score' in response['docSentiment']:
print('score: ', response['docSentiment']['score'])
else:
print('Error in targeted sentiment analysis call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Text Extraction Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.text('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Text ##')
print('text: ', response['text'].encode('utf-8'))
print('')
else:
print('Error in text extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Author Extraction Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.author('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Author ##')
print('author: ', response['author'].encode('utf-8'))
print('')
else:
print('Error in author extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Language Detection Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.language('text',demo_text)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Language ##')
print('language: ', response['language'])
print('iso-639-1: ', response['iso-639-1'])
print('native speakers: ', response['native-speakers'])
print('')
else:
print('Error in language detection call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Title Extraction Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.title('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Title ##')
print('title: ', response['title'].encode('utf-8'))
print('')
else:
print('Error in title extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Relation Extraction Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.relations('text',demo_text)
if response['status'] == 'OK':
print('## Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Relations ##')
for relation in response['relations']:
if 'subject' in relation:
print('Subject: ', relation['subject']['text'].encode('utf-8'))
if 'action' in relation:
print('Action: ', relation['action']['text'].encode('utf-8'))
if 'object' in relation:
print('Object: ', relation['object']['text'].encode('utf-8'))
print('')
else:
print('Error in relation extaction call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Text Categorization Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.category('text',demo_text)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Category ##')
print('text: ', response['category'])
print('score: ', response['score'])
print('')
else:
print('Error in text categorization call: ', response['statusInfo'])
print('')
print('')
print('')
print('############################################')
print('# Feed Detection Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.feeds('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Feeds ##')
for feed in response['feeds']:
print('feed: ', feed['feed'])
else:
print('Error in feed detection call: ', response['statusInfo'])
print('')
print('')
print('')
print('')
print('')
print('############################################')
print('# Microformats Parsing Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.microformats('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Microformats ##')
for microformat in response['microformats']:
print('Field: ', microformat['field'].encode('utf-8'))
print('Data: ', microformat['data'])
print('')
else:
print('Error in microformats parsing call: ', response['statusInfo'])
print('')
print('')
print('')
print('')
print('')
print('############################################')
print('# Image Extraction Example #')
print('############################################')
print('')
print('')
print('Processing url: ', demo_url)
print('')
response = alchemyapi.imageExtraction('url',demo_url)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Image ##')
print('Image: ', response['image'])
print('')
else:
print('Error in image extraction call: ', response['statusInfo'])
print('')
print('')
print('')
print('')
print('')
print('############################################')
print('# Taxonomy Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.taxonomy('text',demo_text)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Categories ##')
for category in response['taxonomy']:
print(category['label'], ' : ', category['score'])
print('')
else:
print('Error in taxonomy call: ', response['statusInfo'])
print('')
print('')
print('')
print('')
print('')
print('############################################')
print('# Combined Example #')
print('############################################')
print('')
print('')
print('Processing text: ', demo_text)
print('')
response = alchemyapi.combined('text',demo_text)
if response['status'] == 'OK':
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('## Keywords ##')
for keyword in response['keywords']:
print(keyword['text'], ' : ', keyword['relevance'])
print('')
print('## Concepts ##')
for concept in response['concepts']:
print(concept['text'], ' : ', concept['relevance'])
print('')
print('## Entities ##')
for entity in response['entities']:
print(entity['type'], ' : ', entity['text'], ', ', entity['relevance'])
print(' ')
else:
print('Error in combined call: ', response['statusInfo'])
print('')
print('')
| apache-2.0 | -368,163,000,386,899,300 | 27.163339 | 168 | 0.47345 | false |
oblique-labs/pyVM | rpython/rlib/unicodedata/test/test_unicodedata.py | 1 | 6594 | # encoding: utf-8
import random
import unicodedata
import py
from rpython.rlib.unicodedata import (
unicodedb_3_2_0, unicodedb_5_2_0, unicodedb_6_0_0, unicodedb_6_2_0,
unicodedb_8_0_0)
class TestUnicodeData(object):
def setup_class(cls):
if unicodedata.unidata_version != '5.2.0':
py.test.skip('Needs python with unicode 5.2.0 database.')
seed = random.getrandbits(32)
print "random seed: ", seed
random.seed(seed)
cls.charlist = charlist = []
cls.nocharlist = nocharlist = []
while len(charlist) < 1000 or len(nocharlist) < 1000:
chr = unichr(random.randrange(65536))
try:
charlist.append((chr, unicodedata.name(chr)))
except ValueError:
nocharlist.append(chr)
def test_random_charnames(self):
for chr, name in self.charlist:
assert unicodedb_5_2_0.name(ord(chr)) == name
assert unicodedb_5_2_0.lookup(name) == ord(chr)
def test_random_missing_chars(self):
for chr in self.nocharlist:
py.test.raises(KeyError, unicodedb_5_2_0.name, ord(chr))
def test_isprintable(self):
assert unicodedb_5_2_0.isprintable(ord(' '))
assert unicodedb_5_2_0.isprintable(ord('a'))
assert not unicodedb_5_2_0.isprintable(127)
assert unicodedb_5_2_0.isprintable(0x00010346) # GOTHIC LETTER FAIHU
assert unicodedb_5_2_0.isprintable(0xfffd) # REPLACEMENT CHARACTER
assert unicodedb_5_2_0.isprintable(0xfffd) # REPLACEMENT CHARACTER
assert not unicodedb_5_2_0.isprintable(0xd800) # SURROGATE
assert not unicodedb_5_2_0.isprintable(0xE0020) # TAG SPACE
def test_identifier(self):
assert unicodedb_5_2_0.isxidstart(ord('A'))
assert not unicodedb_5_2_0.isxidstart(ord('_'))
assert not unicodedb_5_2_0.isxidstart(ord('0'))
assert not unicodedb_5_2_0.isxidstart(ord('('))
assert unicodedb_5_2_0.isxidcontinue(ord('A'))
assert unicodedb_5_2_0.isxidcontinue(ord('_'))
assert unicodedb_5_2_0.isxidcontinue(ord('0'))
assert not unicodedb_5_2_0.isxidcontinue(ord('('))
oc = ord(u'日')
assert unicodedb_5_2_0.isxidstart(oc)
def test_compare_functions(self):
def getX(fun, code):
try:
return getattr(unicodedb_5_2_0, fun)(code)
except KeyError:
return -1
for code in range(0x10000):
char = unichr(code)
assert unicodedata.digit(char, -1) == getX('digit', code)
assert unicodedata.numeric(char, -1) == getX('numeric', code)
assert unicodedata.decimal(char, -1) == getX('decimal', code)
assert unicodedata.category(char) == unicodedb_5_2_0.category(code)
assert unicodedata.bidirectional(char) == unicodedb_5_2_0.bidirectional(code)
assert unicodedata.decomposition(char) == unicodedb_5_2_0.decomposition(code)
assert unicodedata.mirrored(char) == unicodedb_5_2_0.mirrored(code)
assert unicodedata.combining(char) == unicodedb_5_2_0.combining(code)
def test_compare_methods(self):
for code in range(0x10000):
char = unichr(code)
assert char.isalnum() == unicodedb_5_2_0.isalnum(code)
assert char.isalpha() == unicodedb_5_2_0.isalpha(code)
assert char.isdecimal() == unicodedb_5_2_0.isdecimal(code)
assert char.isdigit() == unicodedb_5_2_0.isdigit(code)
assert char.islower() == unicodedb_5_2_0.islower(code)
assert char.isnumeric() == unicodedb_5_2_0.isnumeric(code)
assert char.isspace() == unicodedb_5_2_0.isspace(code), hex(code)
assert char.istitle() == (unicodedb_5_2_0.isupper(code) or unicodedb_5_2_0.istitle(code)), code
assert char.isupper() == unicodedb_5_2_0.isupper(code)
assert char.lower() == unichr(unicodedb_5_2_0.tolower(code))
assert char.upper() == unichr(unicodedb_5_2_0.toupper(code))
assert char.title() == unichr(unicodedb_5_2_0.totitle(code)), hex(code)
def test_hangul_difference_520(self):
assert unicodedb_5_2_0.name(40874) == 'CJK UNIFIED IDEOGRAPH-9FAA'
def test_differences(self):
assert unicodedb_5_2_0.name(9187) == 'BENZENE RING WITH CIRCLE'
assert unicodedb_5_2_0.lookup('BENZENE RING WITH CIRCLE') == 9187
py.test.raises(KeyError, unicodedb_3_2_0.lookup, 'BENZENE RING WITH CIRCLE')
py.test.raises(KeyError, unicodedb_3_2_0.name, 9187)
def test_casefolding(self):
assert unicodedb_6_2_0.casefold_lookup(223) == [115, 115]
assert unicodedb_6_2_0.casefold_lookup(976) == [946]
assert unicodedb_5_2_0.casefold_lookup(42592) == None
# 1010 has been remove between 3.2.0 and 5.2.0
assert unicodedb_3_2_0.casefold_lookup(1010) == [963]
assert unicodedb_5_2_0.casefold_lookup(1010) == None
# 7838 has been added in 5.2.0
assert unicodedb_3_2_0.casefold_lookup(7838) == None
assert unicodedb_5_2_0.casefold_lookup(7838) == [115, 115]
# Only lookup who cannot be resolved by `lower` are stored in database
assert unicodedb_3_2_0.casefold_lookup(ord('E')) == None
class TestUnicodeData600(object):
def test_some_additions(self):
additions = {
ord(u"\u20B9"): 'INDIAN RUPEE SIGN',
# u'\U0001F37A'
127866: 'BEER MUG',
# u'\U0001F37B'
127867: 'CLINKING BEER MUGS',
# u"\U0001F0AD"
127149: 'PLAYING CARD QUEEN OF SPADES',
# u"\U0002B740"
177984: "CJK UNIFIED IDEOGRAPH-2B740",
}
for un, name in additions.iteritems():
assert unicodedb_6_0_0.name(un) == name
assert unicodedb_6_0_0.isprintable(un)
def test_special_casing(self):
assert unicodedb_6_0_0.tolower_full(ord('A')) == [ord('a')]
# The German es-zed is special--the normal mapping is to SS.
assert unicodedb_6_0_0.tolower_full(ord(u'\xdf')) == [0xdf]
assert unicodedb_6_0_0.toupper_full(ord(u'\xdf')) == map(ord, 'SS')
assert unicodedb_6_0_0.totitle_full(ord(u'\xdf')) == map(ord, 'Ss')
def test_islower(self):
assert unicodedb_6_2_0.islower(0x2177)
class TestUnicodeData800(object):
def test_changed_in_version_8(self):
assert unicodedb_6_2_0.toupper_full(0x025C) == [0x025C]
assert unicodedb_8_0_0.toupper_full(0x025C) == [0xA7AB]
| mit | -4,423,305,646,221,714,000 | 42.946667 | 107 | 0.614381 | false |
dereneaton/ipyrad | ipyrad/core/paramsinfo.py | 1 | 21970 | #!/usr/bin/env python
""" Return explanation and options for each parameter.
ip.get_params_info(1) or ip.get_params_info("project_dir")
return the same result. If not argument, a summary of the available
parameters and their numbered references is returned.
Parameter info is stored as a dict of tuples. Each tuple consists of a
short and a long desription for each parameter. By default if you as
for a parameter you'll get the long description
"""
from __future__ import print_function
from collections import OrderedDict
pinfo = OrderedDict([
("0", ("""
(0) assembly_name ----------------------------------------------------
This is the name of your assembly. It will be the prefix for all
directories inside the project directory. An easy default for this
parameter is the name of your project directory. For example if your
project directory is ./white-crowns, then your assembly name could be
white-crowns. Assembly name is variable because you might want to
fork assemblies within a project to try different runs with different
minimum coverage values, different levels of indels allowed, etc.
Examples:
----------------------------------------------------------------------
data.set_params('assembly_name', "white-crowns") ## verbose
----------------------------------------------------------------------
""", "Assembly name. Used to name output directories for assembly steps")
),
("1", ("""
(1) project_dir ------------------------------------------------------
Project name / path for working directory where all data files will be
saved. This parameter affects all steps of assembly (1-7).
Examples:
----------------------------------------------------------------------
data.set_params('project_dir', "./") ## verbose
----------------------------------------------------------------------
""", "Project dir (made in curdir if not present)")
),
("2", ("""
(2) raw_fastq_path ---------------------------------------------------
The directory or files (selected with * wildcard selector) in which
FASTQ data files reside. Files can be gzipped. This parameter affects
only step 1 of assembly. Examples:
----------------------------------------------------------------------
data.set_params("raw_fastq_path", "raw/*.fastq.gz") ## verbose
----------------------------------------------------------------------
""", "Location of raw non-demultiplexed fastq files")
),
("3", ("""
(3) barcodes_path ----------------------------------------------------
Path to the barcodes file used in step 1 of assembly for
demultiplexing. If data are already demultiplexed this can be left
blank. This parameter affects only step 1 of assembly. NB: iPyrad
can only handle one barcodes file at a time, so if you have multiple
barcodes files and multiple raw files then you'll need to run each
separately. Examples:
----------------------------------------------------------------------
data.set_params("barcodes_path", "./barcodes.txt") ## verbose
----------------------------------------------------------------------
""", "Location of barcodes file")
),
("4", ("""
(4) sorted_fastq_path ------------------------------------------------
Path to demultiplexed fastq data. If left blank, this is assigned
automatically to <data.name>_fastq/ within the working directory. If your
data are already demultiplexed then you must enter the location of your
data here. Wildcard selectors can be used to select a subsample of files
within a directory, else all files are selected in the directory.
This parameter affects only step 2 of assembly.
Examples:
----------------------------------------------------------------------
data.set_params("sorted_fastq_path", "data/*.gz") ##
----------------------------------------------------------------------
""", "Location of demultiplexed/sorted fastq files")
),
("5", ("""
(5) assembly_method --------------------------------------------------
A string specifying the desired assembly method. There are four
available options for assembly method:
denovo - Denovo assembly is the classic pyrad method, and
it is the <default> unless otherwise specified.
Denovo will cluster and align all reads from scratch
reference - Reference assembly will map and align reads to the
provided reference sequence, which must be specified
in parameter 28 (reference_sequence). Strict refer-
ence assembly will throw out all unmapped reads,
which could be a significant proportion depending
on the distance between your reference and study
species.'.
----------------------------------------------------------------------
data.set_params("assembly_method", "denovo") ## verbose
----------------------------------------------------------------------
""", "Assembly method (denovo, reference)")
),
("6", ("""
(6) reference_sequence -----------------------------------------------
The path to the reference sequence you desire to map your reads to.
The reference may be either fasta or gzipped fasta. It should be a
complete reference sequence, including all chromosomes, scaffolds, and
contigs in one huge file (most reference sequences available will be
in this format, especially non-model references). The first time you
attempt to use this sequence it will be indexed (we are using bwa
for reference mapping). This is a time intensive process so expect the
first run to take some time, certainly more than ten minutes, but less
than an hour. If you desire to index the reference yourself you can do
this, but best not to unless you really care about bwa indexing
settings. We chose conservative defaults that have worked well for us
on other projects.
A word on the format of the path (this is important). The path may
either be a full path (desirable) or a path relative to the directory
you are running ipyrad from (supported but be careful of the path).
----------------------------------------------------------------------
data.set_params(6) = /home/wat/data/reference.fa ## set a full path
data.set_params(6) = ./data/reference.fa.gz ## set a relative path
data.set_params("reference_sequence") = ./data/reference.fa ## verbose
----------------------------------------------------------------------
""", "Location of reference sequence file")
),
("7", ("""
(7) datatype ---------------------------------------------------------
Options: rad, gbs, 2brad, ddrad, pairddrad, pairgbs, pair3rad,
This parameter affects all steps of assembly (1-7).
Examples:
----------------------------------------------------------------------
data.set_params(7) = 'rad' ## rad data type
data.set_params(7) = 'gbs' ## gbs data type
data.set_params(7) = 'pairddrad' ## gbs data type
data.set_params("datatype") = 'ddrad' ## verbose
----------------------------------------------------------------------
""", "Datatype (see docs): rad, gbs, ddrad, etc.")
),
("8", ("""
(8) restriction_overhang ---------------------------------------------
A tuple containing one or two restriction overhangs. Single digest
RADseq with sonication requires only one overhange, all other data
types should have two. The first is used for detecting barcodes, the
second is not required, but is used in filtering, and is needed for
removal from short DNA fragments. This parameter affects steps 1,2,4,5,
and 7 of assembly.
Examples:
----------------------------------------------------------------------
data.set_params(8) = ("TGCAG", "") ## default rad (PstI)
data.set_params(8) = ("CWGC", "CWGC") ## gbs or pairgbs (ApeKI)
data.set_params(8) = ("CAGT", "AATT") ## ddrad (ApeKI, MSI)
data.set_params(8) = ("CAGT", "AATT") ## pairddrad (ApeKI, MSI)
data.set_params("restriction_overhang") = ("CAGT", "AATT") ## verbose
----------------------------------------------------------------------
""", "Restriction overhang (cut1,) or (cut1, cut2)")
),
("9", ("""
(9) max_low_qual_bases -----------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(9) = 10
data.set_params("max_low_qual_bases") = 6
----------------------------------------------------------------------
""", "Max low quality base calls (Q<20) in a read")
),
("10", ("""
(10) phred_Qscore_offset ---------------------------------------------
The threshold at which a base call is considered low quality during
step 2 filtering is determined by the phred_Qscore_offset. The default
offset is 33, which is equivalent to a minimum qscore of 20 (99% call
confidence). Some older data use a qscore offset of 64. You can toggle
the offset number to change the threshold for low qual bases. For
example, reducing the offset to 26 is equivalent to a minimum qscore
of 13, which is approximately 95% probability of a correct base call.
Examples:
----------------------------------------------------------------------
data.set_params(10) = 33
data.set_params("phred_Qscore_offset") = 26 ## 95% confidence
data.set_params("phred_Qscore_offset") = 43 ## 99.9% confidence
data.set_params("phred_Qscore_offset") = 33
----------------------------------------------------------------------
""", "phred Q score offset (33 is default and very standard)")
),
("11", ("""
(11) mindepth_statistical --------------------------------------------
An integer value indicating the mindepth for statistical base calls
based a binomial probability with H and E estimated from the data.
Base calls are made at >= the value entered. For most reasonable
estimates of E and H, statistical base calls cannot be made below 5
or 6, and will instead be called N.
The parameter affects steps 5 and 7 of assembly.
Examples:
----------------------------------------------------------------------
data.set_params(11) = (6, 6) ## only stat base calls down to depth=6
data.set_params(11) = (10, 5) ## stat calls above 9, majrule from 9-5.
data.set_params(11) = (10, 1) ## stat calls above 9, majrule from 9-1.
data.set_params(mindepth_statistical) = 6 ## verbose
----------------------------------------------------------------------
""", "Min depth for statistical base calling")
),
("12", ("""
(12) mindepth_majrule ------------------------------------------------
An integer value indicating the mindepth for majority-rule base calls.
Base calls are made at >= the value entered. It may often be advant-
ageous to use a low value for majrule calls to preserve most data during
assembly within-samples, so that more data is clustered between samples.
Low depth data can be filtered out later from the final data set if needed.
The parameter affects steps 5 and 7 of assembly.
Examples:
----------------------------------------------------------------------
data.set_params(12) = (6, 6) ## only stat base calls down to depth=6
data.set_params(12) = (10, 5) ## stat calls above 9, majrule from 9-5.
data.set_params(12) = (10, 1) ## stat calls above 9, majrule from 9-1.
data.set_params(mindepth_majrule) = 6 ## verbose
----------------------------------------------------------------------
""", "Min depth for majority-rule base calling")
),
("13", ("""
(13) maxdepth --------------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(13) = 33
data.set_params("maxdepth") = 33
----------------------------------------------------------------------
""", "Max cluster depth within samples")
),
("14", ("""
(14) clust_threshold -------------------------------------------------
Clustering threshold.
Examples:
----------------------------------------------------------------------
data.set_params(14) = .85 ## clustering similarity threshold
data.set_params(14) = .90 ## clustering similarity threshold
data.set_params(14) = .95 ## very high values not recommended
data.set_params("clust_threshold") = .83 ## verbose
----------------------------------------------------------------------
""", "Clustering threshold for de novo assembly")
),
("15", ("""
(15) max_barcode_mismatch --------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(15) = 1
data.set_params("max_barcode_mismatch") = 1
----------------------------------------------------------------------
""", "Max number of allowable mismatches in barcodes")
),
("16", ("""
(16) filter_adapters ----------------------------------------------
Examples:
-------------------------------------------------------------------
data.set_params(16) = 1
data.set_params("filter_adapters") = 1
-------------------------------------------------------------------
""", "Filter for adapters/primers (1 or 2=stricter)")
),
("17", ("""
(17) filter_min_trim_len ---------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(17) = 1
data.set_params("filter_min_trim_len") = 1
----------------------------------------------------------------------
""", "Min length of reads after adapter trim")
),
("18", ("""
(18) max_alleles_consens ---------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(18) = 1
data.set_params("max_alleles_consens") = 1
----------------------------------------------------------------------
""", "Max alleles per site in consensus sequences")
),
("19", ("""
(19) max_Ns_consens --------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(19) = 1
data.set_params("max_Ns_consens") = 1
----------------------------------------------------------------------
""", "Max N's (uncalled bases) in consensus")
),
("20", ("""
(20) max_Hs_consens --------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(20) = 1
data.set_params("max_Hs_consens") = 1
----------------------------------------------------------------------
""", "Max Hs (heterozygotes) in consensus")
),
("21", ("""
(21) min_samples_locus -----------------------------------------------
Minimum number of samples a locus must be shared across to be included
in the exported data set following filtering for sequencing depth,
paralogs, ...
Examples
----------------------------------------------------------------------
data.set_params(21) = 4 ## min 4; most inclusive phylo data
data.set_params(21) = 20 ## min 20; less data, less missing
data.set_params(21) = 1 ## min 1; most data, most missing
data.set_params("min_samples_locus") = 4 ## verbose
----------------------------------------------------------------------
""", "Min # samples per locus for output")
),
("22", ("""
(22) max_SNPs_locus --------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(22) = 1
data.set_params("max_SNPs_locus") = 1
----------------------------------------------------------------------
""", "Max # SNPs per locus")
),
("23", ("""
(23) max_Indels_locus ------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params(23) = 1
data.set_params("max_Indels_locus") = 1
----------------------------------------------------------------------
""", "Max # of indels per locus")
),
("24", ("""
(24) max_shared_Hs_locus ---------------------------------------------
...
----------------------------------------------------------------------
data.set_params(24) = .25 ## set as proportion of samples
data.set_params(24) = 4 ## set as number of samples
data.set_params(24) = 9999 ## set arbitrarily high
data.set_params("max_shared_Hs_locus") = 4 ## verbose
----------------------------------------------------------------------
""", "Max # heterozygous sites per locus")
),
("25", ("""
(25) trim_reads -- ---------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params("trim_reads") = (0, -5, 0, 0) ## trims last 5 from R1
data.set_params("trim_reads") = (5, 85, 0, 0) ## trims R1 from 5-85
data.set_params("trim_reads") = (5, 85, 5, 85) ## trims both pairs 5-85
----------------------------------------------------------------------
""", "Trim raw read edges (R1>, <R1, R2>, <R2) (see docs)")
),
("26", ("""
(26) trim_loci -------------------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params("trim_loci") = (0, 5, 5, 0)
----------------------------------------------------------------------
""", "Trim locus edges (see docs) (R1>, <R1, R2>, <R2)")
),
("27", ("""
(27) output_formats --------------------------------------------------
Examples:
----------------------------------------------------------------------
* ## [27] output_formats: * means all formats
vcf, phy, nex ## [27] list subset of formats if you want
----------------------------------------------------------------------
""", "Output formats (see docs)")
),
("28", ("""
(28) pop_assign_file -------------------------------------------------
Examples:
----------------------------------------------------------------------
./popfile.txt ## [28] pop_assign_file
/home/users/Documents/popfile.txt ## [28] pop_assign_file
----------------------------------------------------------------------
""", "Path to population assignment file")
),
("29", ("""
(29) reference_as_filter ---------------------------------------------
Examples:
----------------------------------------------------------------------
data.set_params("reference_as_filter") = ./data/reference.fa ## verbose
----------------------------------------------------------------------
""", "Reads mapped to this reference are removed in step 3")
),
])
def paramname(param=""):
""" Get the param name from the dict index value.
"""
try:
name = pinfo[str(param)][0].strip().split(" ")[1]
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized - ".format(param), err)
raise
return name
def paraminfo(param="", short=False):
""" Returns detailed information for the numbered parameter.
Further information is available in the tutorial.
Unlike params() this function doesn't deal well with *
It only takes one parameter at a time and returns the desc
"""
## If the short flag is set return the short description, otherwise
## return the long.
if short:
desc = 1
else:
desc = 0
try:
description = pinfo[str(param)][desc]
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized - ".format(param), err)
raise
return description
def paramsinfo(param="", short=False):
""" This is the human readable version of the paramsinfo() function.
You give it a param and it prints to stdout.
"""
if short:
desc = 1
else:
desc = 0
if param == "*":
for key in pinfo:
print(pinfo[str(key)][desc])
elif param:
try:
print(pinfo[str(param)][desc])
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized", err)
raise
else:
print("Enter a name or number for explanation of the parameter\n")
for key in pinfo:
print(pinfo[str(key)][desc].split("\n")[1][2:-10])
if __name__ == "__main__":
pass
| gpl-3.0 | 1,552,804,021,922,965,200 | 44.298969 | 82 | 0.454802 | false |
pshchelo/heat | heat/engine/resources/stack_resource.py | 1 | 20765 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import json
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import template_format
from heat.engine import attributes
from heat.engine import environment
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template
from heat.rpc import api as rpc_api
LOG = logging.getLogger(__name__)
class StackResource(resource.Resource):
'''
An abstract Resource subclass that allows the management of an entire Stack
as a resource in a parent stack.
'''
# Assume True as this is evaluated before the stack is created
# so there is no way to know for sure without subclass-specific
# template parsing.
requires_deferred_auth = True
def __init__(self, name, json_snippet, stack):
super(StackResource, self).__init__(name, json_snippet, stack)
self._nested = None
self.resource_info = None
def validate(self):
super(StackResource, self).validate()
self.validate_nested_stack()
def validate_nested_stack(self):
try:
name = "%s-%s" % (self.stack.name, self.name)
nested_stack = self._parse_nested_stack(
name,
self.child_template(),
self.child_params())
nested_stack.strict_validate = False
nested_stack.validate()
except AssertionError:
raise
except Exception as ex:
raise exception.StackValidationFailed(
error=_("Failed to validate"),
path=[self.stack.t.get_section_name('resources'), self.name],
message=six.text_type(ex))
def _outputs_to_attribs(self, json_snippet):
outputs = json_snippet.get('Outputs')
if not self.attributes and outputs:
self.attributes_schema = (
attributes.Attributes.schema_from_outputs(outputs))
self.attributes = attributes.Attributes(self.name,
self.attributes_schema,
self._resolve_attribute)
def _needs_update(self, after, before, after_props, before_props,
prev_resource):
# Always issue an update to the nested stack and let the individual
# resources in it decide if they need updating.
return True
@scheduler.wrappertask
def update(self, after, before=None, prev_resource=None):
try:
yield super(StackResource, self).update(after, before,
prev_resource)
except StopIteration:
with excutils.save_and_reraise_exception():
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_cancel_update(self.context,
stack_identity)
def nested(self, force_reload=False, show_deleted=False):
'''Return a Stack object representing the nested (child) stack.
:param force_reload: Forces reloading from the DB instead of returning
the locally cached Stack object
:param show_deleted: Returns the stack even if it's been deleted
'''
if force_reload:
self._nested = None
if self._nested is None and self.resource_id is not None:
self._nested = parser.Stack.load(self.context,
self.resource_id,
show_deleted=show_deleted,
force_reload=force_reload)
if self._nested is None:
raise exception.NotFound(_("Nested stack not found in DB"))
return self._nested
def child_template(self):
'''
Default implementation to get the child template.
Resources that inherit from StackResource should override this method
with specific details about the template used by them.
'''
raise NotImplementedError()
def child_params(self):
'''
Default implementation to get the child params.
Resources that inherit from StackResource should override this method
with specific details about the parameters used by them.
'''
raise NotImplementedError()
def preview(self):
'''
Preview a StackResource as resources within a Stack.
This method overrides the original Resource.preview to return a preview
of all the resources contained in this Stack. For this to be possible,
the specific resources need to override both ``child_template`` and
``child_params`` with specific information to allow the stack to be
parsed correctly. If any of these methods is missing, the entire
StackResource will be returned as if it were a regular Resource.
'''
try:
child_template = self.child_template()
params = self.child_params()
except NotImplementedError:
LOG.warn(_LW("Preview of '%s' not yet implemented"),
self.__class__.__name__)
return self
name = "%s-%s" % (self.stack.name, self.name)
self._nested = self._parse_nested_stack(name, child_template, params)
return self.nested().preview_resources()
def _parse_child_template(self, child_template, child_env):
parsed_child_template = child_template
if isinstance(parsed_child_template, template.Template):
parsed_child_template = parsed_child_template.t
return template.Template(parsed_child_template,
files=self.stack.t.files, env=child_env)
def _parse_nested_stack(self, stack_name, child_template,
child_params, timeout_mins=None,
adopt_data=None):
if timeout_mins is None:
timeout_mins = self.stack.timeout_mins
stack_user_project_id = self.stack.stack_user_project_id
new_nested_depth = self._child_nested_depth()
child_env = environment.get_child_environment(
self.stack.env, child_params,
child_resource_name=self.name,
item_to_remove=self.resource_info)
parsed_template = self._child_parsed_template(child_template,
child_env)
# Note we disable rollback for nested stacks, since they
# should be rolled back by the parent stack on failure
nested = parser.Stack(self.context,
stack_name,
parsed_template,
timeout_mins=timeout_mins,
disable_rollback=True,
parent_resource=self.name,
owner_id=self.stack.id,
user_creds_id=self.stack.user_creds_id,
stack_user_project_id=stack_user_project_id,
adopt_stack_data=adopt_data,
nested_depth=new_nested_depth)
return nested
def _child_nested_depth(self):
if self.stack.nested_depth >= cfg.CONF.max_nested_stack_depth:
msg = _("Recursion depth exceeds %d."
) % cfg.CONF.max_nested_stack_depth
raise exception.RequestLimitExceeded(message=msg)
return self.stack.nested_depth + 1
def _child_parsed_template(self, child_template, child_env):
parsed_template = self._parse_child_template(child_template, child_env)
self._validate_nested_resources(parsed_template)
# Don't overwrite the attributes_schema for subclasses that
# define their own attributes_schema.
if not hasattr(type(self), 'attributes_schema'):
self.attributes = None
self._outputs_to_attribs(parsed_template)
return parsed_template
def _validate_nested_resources(self, templ):
total_resources = (len(templ[templ.RESOURCES]) +
self.stack.root_stack.total_resources())
if self.nested():
# It's an update and these resources will be deleted
total_resources -= len(self.nested().resources)
if (total_resources > cfg.CONF.max_resources_per_stack):
message = exception.StackResourceLimitExceeded.msg_fmt
raise exception.RequestLimitExceeded(message=message)
def create_with_template(self, child_template, user_params=None,
timeout_mins=None, adopt_data=None):
"""Create the nested stack with the given template."""
name = self.physical_resource_name()
if timeout_mins is None:
timeout_mins = self.stack.timeout_mins
stack_user_project_id = self.stack.stack_user_project_id
if user_params is None:
user_params = self.child_params()
child_env = environment.get_child_environment(
self.stack.env,
user_params,
child_resource_name=self.name,
item_to_remove=self.resource_info)
new_nested_depth = self._child_nested_depth()
parsed_template = self._child_parsed_template(child_template,
child_env)
adopt_data_str = None
if adopt_data is not None:
if 'environment' not in adopt_data:
adopt_data['environment'] = child_env.user_env_as_dict()
if 'template' not in adopt_data:
adopt_data['template'] = child_template
adopt_data_str = json.dumps(adopt_data)
args = {rpc_api.PARAM_TIMEOUT: timeout_mins,
rpc_api.PARAM_DISABLE_ROLLBACK: True,
rpc_api.PARAM_ADOPT_STACK_DATA: adopt_data_str}
try:
result = self.rpc_client()._create_stack(
self.context,
name,
parsed_template.t,
child_env.user_env_as_dict(),
parsed_template.files,
args,
owner_id=self.stack.id,
user_creds_id=self.stack.user_creds_id,
stack_user_project_id=stack_user_project_id,
nested_depth=new_nested_depth,
parent_resource_name=self.name)
except Exception as ex:
self.raise_local_exception(ex)
self.resource_id_set(result['stack_id'])
def raise_local_exception(self, ex):
ex_type = ex.__class__.__name__
is_remote = ex_type.endswith('_Remote')
if is_remote:
ex_type = ex_type[:-len('_Remote')]
full_message = six.text_type(ex)
if full_message.find('\n') > -1 and is_remote:
message, msg_trace = full_message.split('\n', 1)
else:
message = full_message
if (isinstance(ex, exception.ActionInProgress) and
self.stack.action == self.stack.ROLLBACK):
# The update was interrupted and the rollback is already in
# progress, so just ignore the error and wait for the rollback to
# finish
return
if isinstance(ex, exception.HeatException):
message = ex.message
local_ex = copy.copy(getattr(exception, ex_type))
local_ex.msg_fmt = "%(message)s"
raise local_ex(message=message)
def check_create_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.CREATE)
def _check_status_complete(self, action, show_deleted=False,
cookie=None):
try:
nested = self.nested(force_reload=True, show_deleted=show_deleted)
except exception.NotFound:
if action == resource.Resource.DELETE:
return True
# It's possible the engine handling the create hasn't persisted
# the stack to the DB when we first start polling for state
return False
if nested is None:
return True
if nested.action != action:
return False
# Has the action really started?
#
# The rpc call to update does not guarantee that the stack will be
# placed into IN_PROGRESS by the time it returns (it runs stack.update
# in a thread) so you could also have a situation where we get into
# this method and the update hasn't even started.
#
# So we are using a mixture of state (action+status) and updated_at
# to see if the action has actually progressed.
# - very fast updates (like something with one RandomString) we will
# probably miss the state change, but we should catch the updated_at.
# - very slow updates we won't see the updated_at for quite a while,
# but should see the state change.
if cookie is not None:
prev_state = cookie['previous']['state']
prev_updated_at = cookie['previous']['updated_at']
if (prev_updated_at == nested.updated_time and
prev_state == nested.state):
return False
if nested.status == resource.Resource.IN_PROGRESS:
return False
elif nested.status == resource.Resource.COMPLETE:
return True
elif nested.status == resource.Resource.FAILED:
raise resource.ResourceUnknownStatus(
resource_status=nested.status,
status_reason=nested.status_reason)
else:
raise resource.ResourceUnknownStatus(
resource_status=nested.status,
result=_('Stack unknown status'))
def check_adopt_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.ADOPT)
def update_with_template(self, child_template, user_params=None,
timeout_mins=None):
"""Update the nested stack with the new template."""
if self.id is None:
self._store()
nested_stack = self.nested()
if nested_stack is None:
# if the create failed for some reason and the nested
# stack was not created, we need to create an empty stack
# here so that the update will work.
def _check_for_completion(creator_fn):
while not self.check_create_complete(creator_fn):
yield
empty_temp = template_format.parse(
"heat_template_version: '2013-05-23'")
stack_creator = self.create_with_template(empty_temp, {})
checker = scheduler.TaskRunner(_check_for_completion,
stack_creator)
checker(timeout=self.stack.timeout_secs())
if stack_creator is not None:
stack_creator.run_to_completion()
nested_stack = self.nested()
if timeout_mins is None:
timeout_mins = self.stack.timeout_mins
if user_params is None:
user_params = self.child_params()
child_env = environment.get_child_environment(
self.stack.env,
user_params,
child_resource_name=self.name,
item_to_remove=self.resource_info)
parsed_template = self._child_parsed_template(child_template,
child_env)
cookie = {'previous': {
'updated_at': nested_stack.updated_time,
'state': nested_stack.state}}
args = {rpc_api.PARAM_TIMEOUT: timeout_mins}
try:
self.rpc_client().update_stack(
self.context,
nested_stack.identifier(),
parsed_template.t,
child_env.user_env_as_dict(),
parsed_template.files,
args)
except Exception as ex:
LOG.exception('update_stack')
self.raise_local_exception(ex)
return cookie
def check_update_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.UPDATE,
cookie=cookie)
def delete_nested(self):
'''
Delete the nested stack.
'''
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
try:
self.rpc_client().delete_stack(self.context, stack_identity)
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
def check_delete_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.DELETE,
show_deleted=True)
def handle_suspend(self):
stack = self.nested()
if stack is None:
raise exception.Error(_('Cannot suspend %s, stack not created')
% self.name)
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_suspend(self.context, stack_identity)
def check_suspend_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.SUSPEND)
def handle_resume(self):
stack = self.nested()
if stack is None:
raise exception.Error(_('Cannot resume %s, stack not created')
% self.name)
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_resume(self.context, stack_identity)
def check_resume_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.RESUME)
def handle_check(self):
stack = self.nested()
if stack is None:
raise exception.Error(_('Cannot check %s, stack not created')
% self.name)
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_check(self.context, stack_identity)
def check_check_complete(self, cookie=None):
return self._check_status_complete(resource.Resource.CHECK)
def prepare_abandon(self):
return self.nested().prepare_abandon()
def get_output(self, op):
'''
Return the specified Output value from the nested stack.
If the output key does not exist, raise an InvalidTemplateAttribute
exception.
'''
stack = self.nested()
if stack is None:
return None
if op not in stack.outputs:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=op)
return stack.output(op)
def _resolve_attribute(self, name):
return self.get_output(name)
def implementation_signature(self):
schema_names = ([prop for prop in self.properties_schema] +
[at for at in self.attributes_schema])
schema_hash = hashlib.sha256(';'.join(schema_names))
definition = {'template': self.child_template(),
'files': self.stack.t.files}
definition_hash = hashlib.sha256(jsonutils.dumps(definition))
return (schema_hash.hexdigest(), definition_hash.hexdigest())
| apache-2.0 | -4,581,617,663,593,579,000 | 38.932692 | 79 | 0.584589 | false |
hds-lab/coding-ml | msgvis/apps/enhance/migrations/0002_auto_20160222_0230.py | 1 | 1306 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('enhance', '0001_initial'),
(b'auth', b'__first__'), # This line and the next line is for fixing the "Lookup failed auth.User" error
(b'contenttypes', b'__first__'),
]
operations = [
migrations.AddField(
model_name='feature',
name='created_at',
field=models.DateTimeField(default=None, auto_now_add=True),
preserve_default=True,
),
migrations.AddField(
model_name='feature',
name='last_updated',
field=models.DateTimeField(default=None, auto_now=True, auto_now_add=True),
preserve_default=True,
),
migrations.AddField(
model_name='feature',
name='valid',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AlterField(
model_name='feature',
name='source',
field=models.ForeignKey(related_name='features', default=None, to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| mit | -7,770,648,743,024,572,000 | 30.853659 | 119 | 0.570444 | false |
jj1bdx/wspr | WsprMod/iq.py | 1 | 4726 | #------------------------------------------------------------------ iq
from Tkinter import *
import Pmw
import g
import w
import time
import tkMessageBox
import pickle
def done():
root.withdraw()
root=Toplevel()
root.withdraw()
root.protocol('WM_DELETE_WINDOW',done)
if g.Win32: root.iconbitmap("wsjt.ico")
root.title("I-Q Mode")
def iq2(t):
root.geometry(t)
root.deiconify()
root.focus_set()
j=ib.get()
lab0.configure(text=str(mb[j])+' m')
iqmode=IntVar()
iqrx=IntVar()
iqtx=IntVar()
fiq=IntVar()
iqrxapp=IntVar()
iqrxadj=IntVar()
isc2=IntVar()
isc2.set(0)
isc2a=IntVar()
isc2a.set(0)
isc3=IntVar()
isc3.set(0)
isc3a=IntVar()
isc3a.set(0)
ib=IntVar()
gain=DoubleVar()
phdeg=DoubleVar()
mb=[0,600,160,80,60,40,30,20,17,15,12,10,6,4,2,0]
tbal=[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
tpha=[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
rbal=[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]
rpha=[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
allbands=0
def saveband(event=NONE):
global allbands,tbal,tpha,rbal,rpha
if allbands:
for j in range(1,15):
tbal[j]=isc2.get() + 0.02*isc2a.get()
tpha[j]=isc3.get() + 0.02*isc3a.get()
rbal[j]=w.acom1.gain
rpha[j]=57.2957795*w.acom1.phase
else:
j=ib.get()
tbal[j]=isc2.get() + 0.02*isc2a.get()
tpha[j]=isc3.get() + 0.02*isc3a.get()
rbal[j]=w.acom1.gain
rpha[j]=57.2957795*w.acom1.phase
f=open(g.appdir+'/iqpickle',mode='w')
pickle.dump(tbal,f)
pickle.dump(tpha,f)
pickle.dump(rbal,f)
pickle.dump(rpha,f)
f.close()
def saveall(event=NONE):
global allbands
allbands=1
saveband()
allbands=0
def restore():
global tbal,tpha,rbal,rpha
try:
f=open(g.appdir+'/iqpickle',mode='r')
tbal=pickle.load(f)
tpha=pickle.load(f)
rbal=pickle.load(f)
rpha=pickle.load(f)
f.close()
except:
pass
newband()
def newband():
j=ib.get()
lab0.configure(text=str(mb[j])+' m')
w.acom1.gain=rbal[j]
w.acom1.phase=rpha[j]/57.2957795
isc2.set(int(tbal[j]))
isc2a.set(int((tbal[j]-isc2.get())/0.02))
isc3.set(int(tpha[j]))
isc3a.set(int((tpha[j]-isc3.get())/0.02))
#-------------------------------------------------------- Create GUI widgets
g1=Pmw.Group(root,tag_pyclass=None)
lab0=Label(g1.interior(),text='160 m',bg='yellow',pady=5)
lab0.place(x=180,y=40, anchor='e')
#lab0.pack(anchor=W,padx=5,pady=4)
biqmode=Checkbutton(g1.interior(),text='Enable I/Q mode',variable=iqmode)
biqmode.pack(anchor=W,padx=5,pady=2)
biqtx=Checkbutton(g1.interior(),text='Reverse Tx I,Q',variable=iqtx)
biqtx.pack(anchor=W,padx=5,pady=2)
biqrx=Checkbutton(g1.interior(),text='Reverse Rx I,Q',variable=iqrx)
biqrx.pack(anchor=W,padx=5,pady=2)
biqrxapp=Checkbutton(g1.interior(),text='Apply Rx phasing corrections', \
variable=iqrxapp)
biqrxapp.pack(anchor=W,padx=5,pady=2)
biqrxadj=Checkbutton(g1.interior(),text='Adjust Rx phasing', \
variable=iqrxadj)
biqrxadj.pack(anchor=W,padx=5,pady=2)
lab1=Label(g1.interior(),text='',justify=LEFT)
lab1.pack(anchor=W,padx=5,pady=4)
fiq_entry=Pmw.EntryField(g1.interior(),labelpos=W,label_text='Fiq (Hz): ',
value='12000',entry_textvariable=fiq,entry_width=10,
validate={'validator':'integer','min':-24000,'max':24000,
'minstrict':0,'maxstrict':0})
fiq_entry.pack(fill=X,padx=2,pady=4)
sc2=Scale(g1.interior(),orient=HORIZONTAL,length=200,from_=-30, \
to=30,variable=isc2,label='Tx I/Q Balance (0.1 dB)', \
relief=SOLID,bg='#EEDD82')
sc2.pack(side=TOP,padx=4,pady=2)
sc2a=Scale(g1.interior(),orient=HORIZONTAL,length=200,from_=-50, \
to=50,variable=isc2a,label='Tx I/Q Balance (0.002 dB)', \
relief=SOLID,bg='#EEDD82')
sc2a.pack(side=TOP,padx=4,pady=2)
sc3=Scale(g1.interior(),orient=HORIZONTAL,length=200,from_=-20, \
to=20,variable=isc3,label='Tx Phase (deg)', \
relief=SOLID,bg='#AFeeee')
sc3.pack(side=TOP,padx=4,pady=2)
sc3a=Scale(g1.interior(),orient=HORIZONTAL,length=200,from_=-50, \
to=50,variable=isc3a,label='Tx Phase (0.02 deg)', \
relief=SOLID,bg='#AFeeee')
sc3a.pack(side=TOP,padx=4,pady=2)
bsave=Button(g1.interior(), text='Save for this band',command=saveband,
width=32,padx=1,pady=2)
bsave.pack(padx=2,pady=4)
bsaveall=Button(g1.interior(), text='Save for all bands',command=saveall,
width=32,padx=1,pady=2)
bsaveall.pack(padx=2,pady=4)
f1=Frame(g1.interior(),width=100,height=1)
f1.pack()
g1.pack(side=LEFT,fill=BOTH,expand=1,padx=4,pady=4)
| gpl-2.0 | -423,385,164,766,437,600 | 27.46988 | 82 | 0.621244 | false |
zestyr/lbry | lbrynet/core/server/ServerRequestHandler.py | 1 | 5866 | import json
import logging
from twisted.internet import interfaces, defer
from zope.interface import implements
from lbrynet.interfaces import IRequestHandler
log = logging.getLogger(__name__)
class ServerRequestHandler(object):
"""This class handles requests from clients. It can upload blobs and
return request for information about more blobs that are
associated with streams.
"""
implements(interfaces.IPushProducer, interfaces.IConsumer, IRequestHandler)
def __init__(self, consumer):
self.consumer = consumer
self.production_paused = False
self.request_buff = ''
self.response_buff = ''
self.producer = None
self.request_received = False
self.CHUNK_SIZE = 2**14
self.query_handlers = {} # {IQueryHandler: [query_identifiers]}
self.blob_sender = None
self.consumer.registerProducer(self, True)
#IPushProducer stuff
def pauseProducing(self):
self.production_paused = True
def stopProducing(self):
if self.producer is not None:
self.producer.stopProducing()
self.producer = None
self.production_paused = True
self.consumer.unregisterProducer()
def resumeProducing(self):
from twisted.internet import reactor
self.production_paused = False
self._produce_more()
if self.producer is not None:
reactor.callLater(0, self.producer.resumeProducing)
def _produce_more(self):
from twisted.internet import reactor
if self.production_paused:
return
chunk = self.response_buff[:self.CHUNK_SIZE]
self.response_buff = self.response_buff[self.CHUNK_SIZE:]
if chunk == '':
return
log.trace("writing %s bytes to the client", len(chunk))
self.consumer.write(chunk)
reactor.callLater(0, self._produce_more)
#IConsumer stuff
def registerProducer(self, producer, streaming):
self.producer = producer
assert streaming is False
producer.resumeProducing()
def unregisterProducer(self):
self.producer = None
def write(self, data):
from twisted.internet import reactor
self.response_buff = self.response_buff + data
self._produce_more()
def get_more_data():
if self.producer is not None:
log.trace("Requesting more data from the producer")
self.producer.resumeProducing()
reactor.callLater(0, get_more_data)
#From Protocol
def data_received(self, data):
log.debug("Received data")
log.debug("%s", str(data))
if self.request_received is False:
return self._parse_data_and_maybe_send_blob(data)
else:
log.warning(
"The client sent data when we were uploading a file. This should not happen")
def _parse_data_and_maybe_send_blob(self, data):
self.request_buff = self.request_buff + data
msg = self.try_to_parse_request(self.request_buff)
if msg:
self.request_buff = ''
self._process_msg(msg)
else:
log.debug("Request buff not a valid json message")
log.debug("Request buff: %s", self.request_buff)
def _process_msg(self, msg):
d = self.handle_request(msg)
if self.blob_sender:
d.addCallback(lambda _: self.blob_sender.send_blob_if_requested(self))
d.addCallbacks(lambda _: self.finished_response(), self.request_failure_handler)
######### IRequestHandler #########
def register_query_handler(self, query_handler, query_identifiers):
self.query_handlers[query_handler] = query_identifiers
def register_blob_sender(self, blob_sender):
self.blob_sender = blob_sender
#response handling
def request_failure_handler(self, err):
log.warning("An error occurred handling a request. Error: %s", err.getErrorMessage())
self.stopProducing()
return err
def finished_response(self):
self.request_received = False
self._produce_more()
def send_response(self, msg):
m = json.dumps(msg)
log.debug("Sending a response of length %s", str(len(m)))
log.debug("Response: %s", str(m))
self.response_buff = self.response_buff + m
self._produce_more()
return True
def handle_request(self, msg):
log.debug("Handling a request")
log.debug(str(msg))
def create_response_message(results):
response = {}
for success, result in results:
if success is True:
response.update(result)
else:
# result is a Failure
return result
log.debug("Finished making the response message. Response: %s", str(response))
return response
def log_errors(err):
log.warning(
"An error occurred handling a client request. Error message: %s",
err.getErrorMessage())
return err
def send_response(response):
self.send_response(response)
return True
ds = []
for query_handler, query_identifiers in self.query_handlers.iteritems():
queries = {q_i: msg[q_i] for q_i in query_identifiers if q_i in msg}
d = query_handler.handle_queries(queries)
d.addErrback(log_errors)
ds.append(d)
dl = defer.DeferredList(ds)
dl.addCallback(create_response_message)
dl.addCallback(send_response)
return dl
def try_to_parse_request(self, request_buff):
try:
msg = json.loads(request_buff)
return msg
except ValueError:
return None
| mit | 8,375,703,638,704,539,000 | 30.537634 | 93 | 0.608421 | false |
diegodelemos/reana-job-controller | tests/test_job_manager.py | 1 | 4870 | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2019 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Job-Controller Job Manager tests."""
import json
import os
import uuid
import mock
import pytest
from reana_db.models import Job, JobStatus
from reana_job_controller.job_manager import JobManager
from reana_job_controller.kubernetes_job_manager import KubernetesJobManager
def test_execute_kubernetes_job(
app,
session,
sample_serial_workflow_in_db,
sample_workflow_workspace,
default_user,
empty_user_secrets,
corev1_api_client_with_user_secrets,
monkeypatch,
):
"""Test execution of Kubernetes job."""
workflow_uuid = sample_serial_workflow_in_db.id_
workflow_workspace = next(sample_workflow_workspace(str(workflow_uuid)))
env_var_key = "key"
env_var_value = "value"
expected_env_var = {env_var_key: env_var_value}
expected_image = "busybox"
expected_command = ["ls"]
monkeypatch.setenv("REANA_USER_ID", str(default_user.id_))
job_manager = KubernetesJobManager(
docker_img=expected_image,
cmd=expected_command,
env_vars=expected_env_var,
workflow_uuid=workflow_uuid,
workflow_workspace=workflow_workspace,
)
with mock.patch(
"reana_job_controller.kubernetes_job_manager." "current_k8s_batchv1_api_client"
) as kubernetes_client:
with mock.patch(
"reana_commons.k8s.secrets." "current_k8s_corev1_api_client",
corev1_api_client_with_user_secrets(empty_user_secrets),
):
kubernetes_job_id = job_manager.execute()
created_job = (
session.query(Job)
.filter_by(backend_job_id=kubernetes_job_id)
.one_or_none()
)
assert created_job
assert created_job.docker_img == expected_image
assert created_job.cmd == json.dumps(expected_command)
assert json.dumps(expected_env_var) in created_job.env_vars
assert created_job.status == JobStatus.created
kubernetes_client.create_namespaced_job.assert_called_once()
body = kubernetes_client.create_namespaced_job.call_args[1]["body"]
env_vars = body["spec"]["template"]["spec"]["containers"][0]["env"]
image = body["spec"]["template"]["spec"]["containers"][0]["image"]
command = body["spec"]["template"]["spec"]["containers"][0]["command"]
assert len(env_vars) == 3
assert {"name": env_var_key, "value": env_var_value} in env_vars
assert image == expected_image
assert command == expected_command
def test_stop_kubernetes_job(
app,
session,
sample_serial_workflow_in_db,
sample_workflow_workspace,
empty_user_secrets,
default_user,
corev1_api_client_with_user_secrets,
monkeypatch,
):
"""Test stop of Kubernetes job."""
workflow_uuid = sample_serial_workflow_in_db.id_
workflow_workspace = next(sample_workflow_workspace(str(workflow_uuid)))
expected_env_var_name = "env_var"
expected_env_var_value = "value"
expected_image = "busybox"
expected_command = ["ls"]
monkeypatch.setenv("REANA_USER_ID", str(default_user.id_))
job_manager = KubernetesJobManager(
docker_img=expected_image,
cmd=expected_command,
env_vars={expected_env_var_name: expected_env_var_value},
workflow_uuid=workflow_uuid,
workflow_workspace=workflow_workspace,
)
with mock.patch(
"reana_job_controller.kubernetes_job_manager." "current_k8s_batchv1_api_client"
) as kubernetes_client:
with mock.patch(
"reana_commons.k8s.secrets." "current_k8s_corev1_api_client",
corev1_api_client_with_user_secrets(empty_user_secrets),
):
kubernetes_job_id = job_manager.execute()
kubernetes_client.create_namespaced_job.assert_called_once()
job_manager.stop(kubernetes_job_id)
kubernetes_client.delete_namespaced_job.assert_called_once()
def test_execution_hooks():
"""Test hook execution order."""
class TestJobManger(JobManager):
@JobManager.execution_hook
def execute(self):
self.order_list.append(2)
job_id = str(uuid.uuid4())
return job_id
def before_execution(self):
self.order_list = []
self.order_list.append(1)
def create_job_in_db(self, job_id):
self.order_list.append(3)
def cache_job(self):
self.order_list.append(4)
job_manager = TestJobManger("busybox", "ls", {})
job_manager.execute()
assert job_manager.order_list == [1, 2, 3, 4]
| mit | -5,248,791,011,333,741,000 | 34.289855 | 87 | 0.634702 | false |
kencochrane/docker-django-demo | dockerdemo/voting/migrations/0001_initial.py | 1 | 1782 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-16 20:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='date published')),
('last_update', models.DateTimeField(auto_now=True, verbose_name='last updated')),
],
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vote_date', models.DateTimeField(auto_now=True, verbose_name='date voted')),
('ip_address', models.GenericIPAddressField()),
('selection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='voting.Choice')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='voting.Question'),
),
]
| mit | -4,280,010,848,897,716,000 | 36.914894 | 114 | 0.574635 | false |
todddeluca/diabric | diabric/files.py | 1 | 11396 |
'''
Fabric utilities for working with files.
'''
import StringIO
import contextlib
import os
import shutil
import subprocess
import uuid
from fabric.api import sudo, run, settings, hide, put, local
from fabric.contrib.files import exists
##################
# HELPER FUNCTIONS
# These functions are reusable snippets meant to improve the consistency
# and modularity of files.py code
def set_mode(path, mode, remote=True, use_sudo=False):
'''
To improve code consistency and composition, this function
changes the mode of `path` to `mode`.
path: the path to the file or directory whose mode is being set.
remote: indicates that filename is a located on a remote host and `run`
or `sudo` should be used to set the mode.
use_sudo: only applies when remote is True. Use `sudo` instead of `run`.
'''
func = local if not remote else sudo if use_sudo else run
func('chmod {} {}'.format(oct(mode), path))
def backup_file(filename, remote=True, use_sudo=False, extension='.bak'):
'''
filename: path to a local or remote file
If filename exists, copy filename to filename.bak
'''
func = local if not remote else sudo if use_sudo else run
if exists(filename):
func("cp %s %s.bak" % (filename, filename))
def normalize_dest(src, dest, remote=True, use_sudo=False):
'''
src: a file path
dest: a file or directory path
If dest is an existing directory, this returns a path to the basename of src within the directory dest.
Otherwise, if dest is returned unchanged.
This is useful for getting an actual filename when destination can be
a file or a directory.
'''
func = local if not remote else sudo if use_sudo else run
# Normalize dest to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % dest).succeeded:
dest = os.path.join(dest, os.path.basename(src))
return dest
################
# FILE FUNCTIONS
def file_template(filename, destination, context=None, use_jinja=False,
template_dir=None, backup=True, mirror_local_mode=False, mode=None):
"""
This is the local version of upload_template.
Render and copy a template text file to a local destination.
``filename`` should be the path to a text file, which may contain `Python
string interpolation formatting
<http://docs.python.org/release/2.5.4/lib/typesseq-strings.html>`_ and will
be rendered with the given context dictionary ``context`` (if given.)
Alternately, if ``use_jinja`` is set to True and you have the Jinja2
templating library available, Jinja will be used to render the template
instead. Templates will be loaded from the invoking user's current working
directory by default, or from ``template_dir`` if given.
The resulting rendered file will be written to the local file path
``destination``. If the destination file already exists, it will be
renamed with a ``.bak`` extension unless ``backup=False`` is specified.
The ``mirror_local_mode`` and ``mode`` kwargs are used in a similar
manner as in `~fabric.operations.put`; please see its documentation for
details on these two options.
"""
func = local
# make sure destination is a file name, not a directory name.
destination = normalize_dest(filename, destination, remote=False)
# grab mode before writing destination, in case filename and destination
# are the same.
if mirror_local_mode and mode is None:
# mode is numeric. See os.chmod or os.stat.
mode = os.stat(src).st_mode
# Process template
text = None
if use_jinja:
try:
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir or '.'))
text = jenv.get_template(filename).render(**context or {})
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + "\nUnable to import Jinja2 -- see above.")
else:
with open(filename) as inputfile:
text = inputfile.read()
if context:
text = text % context
if backup:
backup_file(destination, remote=False)
# write the processed text
with open(destination, 'w') as fh:
fh.write(text)
if mode:
set_mode(destination, mode, remote=False)
def fix_shebang(shebang, handle):
'''
shebang: a shebang line, e.g. #!/usr/bin/env python or #!/bin/sh. If
shebang does not start with '#!', then '#!' will be prepended to it. If
shebang does not end with a newline, a newline will be appended.
handle: a iterable of lines, presumably the contents of a file that needs a
shebang line or a new shebang line.
Yield shebang and then the lines in handle except the first line in handle
if it is a shebang line.
'''
# make sure shebang is starts with '#!' and ends with a newline.
if not shebang.startswith('#!'):
shebang = '#!' + shebang
if not shebang.endswith('\n'):
shebang += '\n'
for i, line in enumerate(handle):
if i == 0:
yield shebang
if not line.startswith('#!'):
yield line
else:
yield line
def upload_shebang(filename, destination, shebang, use_sudo=False, backup=True,
mirror_local_mode=False, mode=None):
"""
Upload a text file to a remote host, adding or updating the shebang line.
``filename`` should be the path to a text file.
``shebang`` should be a string containing a shebang line. E.g.
"#!/usr/bin/python\n". If shebang does not start with '#!' or end with a
newline, these will be added.
If the first line in filename starts with '#!' it will be replaced with
shebang. If the first line does not start with #!, shebang will be
prepended to the contents of filename.
The resulting file will be uploaded to the remote file path
``destination``. If the destination file already exists, it will be
renamed with a ``.bak`` extension unless ``backup=False`` is specified.
By default, the file will be copied to ``destination`` as the logged-in
user; specify ``use_sudo=True`` to use `sudo` instead.
The ``mirror_local_mode`` and ``mode`` kwargs are passed directly to an
internal `~fabric.operations.put` call; please see its documentation for
details on these two options.
"""
func = use_sudo and sudo or run
# Normalize destination to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % destination).succeeded:
sep = "" if destination.endswith('/') else "/"
destination += sep + os.path.basename(filename)
# Use mode kwarg to implement mirror_local_mode, again due to using
# StringIO
if mirror_local_mode and mode is None:
mode = os.stat(filename).st_mode
# To prevent put() from trying to do this
# logic itself
mirror_local_mode = False
# process filename
text = None
with open(filename) as inputfile:
text = ''.join(fix_shebang(shebang, inputfile))
# Back up original file
if backup and exists(destination):
func("cp %s{,.bak}" % destination)
# Upload the file.
put(
local_path=StringIO.StringIO(text),
remote_path=destination,
use_sudo=use_sudo,
mirror_local_mode=mirror_local_mode,
mode=mode
)
def upload_format(filename, destination, args=None, kws=None,
use_sudo=False, backup=True, mirror_local_mode=False,
mode=None):
"""
Read in the contents of filename, format the contents via
contents.format(*args, **kws), and upload the results to the
destination on the remote host.
``filename`` should be the path to a text file. The contents of
``filename`` will be read in.
Format the contents, using contents.format(*args, **kws). If
args is None, it will not be included in the format() call.
Likewise for kws.
The resulting contents will be uploaded to the remote file path
``destination``. If the destination file already exists, it will be
renamed with a ``.bak`` extension unless ``backup=False`` is specified.
By default, the file will be copied to ``destination`` as the logged-in
user; specify ``use_sudo=True`` to use `sudo` instead.
The ``mirror_local_mode`` and ``mode`` kwargs are passed directly to an
internal `~fabric.operations.put` call; please see its documentation for
details on these two options.
"""
func = use_sudo and sudo or run
# Normalize destination to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % destination).succeeded:
sep = "" if destination.endswith('/') else "/"
destination += sep + os.path.basename(filename)
# Use mode kwarg to implement mirror_local_mode, again due to using
# StringIO
if mirror_local_mode and mode is None:
mode = os.stat(filename).st_mode
# To prevent put() from trying to do this
# logic itself
mirror_local_mode = False
# process filename
text = None
with open(filename) as inputfile:
if not args:
args = []
if not kws:
kws = {}
text = inputfile.read().format(*args, **kws)
# Back up original file
if backup and exists(destination):
func("cp %s{,.bak}" % destination)
# Upload the file.
put(
local_path=StringIO.StringIO(text),
remote_path=destination,
use_sudo=use_sudo,
mirror_local_mode=mirror_local_mode,
mode=mode
)
def file_format(infile, outfile, args=None, kws=None):
'''
Consider using fabric.contrib.files.upload_template or upload_format
infile: a local file path
outfile: a local file path.
Read the contents of infile as a string, ''.format() the string using args
and kws, and write the formatted string to outfile. This is useful if
infile is a "template" and args and kws contain the concrete values
for the template.
'''
if args is None:
args = []
if kws is None:
kws is {}
with open(infile) as fh:
text = fh.read()
new_text = text.format(*args, **kws)
with open(outfile, 'w') as fh2:
fh2.write(new_text)
def rsync(options, src, dest, user=None, host=None, cwd=None):
'''
Consider using fabric.contrib.project.rsync_project.
options: list of rsync options, e.g. ['--delete', '-avz']
src: source directory (or files). Note: rsync behavior varies depending on whether or not src dir ends in '/'.
dest: destination directory.
cwd: change (using subprocess) to cwd before running rsync.
This is a helper function for running rsync locally, via subprocess. Note: shell=False.
'''
# if remote user and host specified, copy there instead of locally.
if user and host:
destStr = '{}@{}:{}'.format(user, host, dest)
else:
destStr = dest
args = ['rsync'] + options + [src, destStr]
print args
subprocess.check_call(args, cwd=cwd)
| mit | 3,146,706,489,054,374,400 | 33.850153 | 115 | 0.650053 | false |
NGnius/ModScraper | ModScraper.py | 1 | 4720 | '''This actually runs everything'''
import sys, os, time
sys.path.append(os.getcwd()+'/Resources')
import ForumsToCheck, scraper, config, log_it
import necroDetector, profanityDetector, capsDetector, annoyanceAlerts
from multiprocessing import Process
def init(queue):
global q
q = queue
def main():
'''() -> None
main function, goes through the all the stuff'''
startTime = time.time() #for science!
log_it.logg("Starting scraping round", q, genus=["debug"])
scrapeForums()
elapsedTime = time.time()-startTime #for science!
log_it.logg("Scraping round completed successfully", q, genus=["debug"])
log_it.logg ("Time elapsed in latest scrape: " + str(elapsedTime), q, genus=["benchmarking"])
if elapsedTime < config.retrieveConfig("Period"): #if the period specified in config hasn't elapsed completely
log_it.logg("Sleeping",q, genus=["debug"]) #debug
time.sleep(config.retrieveConfig("Period")-elapsedTime) #sleep until the period time has elapsed
def scrapePost(p, q, check):
''' (url : str, Queue object, str)
scrape post if check is right'''
startTime = time.time()
post = scraper.pageClass()
log_it.logg("Scraping " + p, q, genus=["debug", "verbose"])
post.ret(p)
if config.retrieveConfig("NecroDetection")==check:
if necroDetector.isNecro(post, config.retrieveConfig("NecroTimeDelta")):
log_it.logg(p + " has been bumped from the dead.", q, genus=["output"])
if config.retrieveConfig("ProfanityDetection")==check:
if profanityDetector.isProfanity(post, config.retrieveConfig("SwearThreshold")):
log_it.logg(p + " has been sworn in a lot.", q, genus=["output"])
if config.retrieveConfig("CapsTitleDetection")==check:
if capsDetector.isCapsTitle(post, config.retrieveConfig("CapsTitleThreshold")):
log_it.logg(p + " has a lot of caps in the title.", q, genus=["output"])
if config.retrieveConfig("CrusherAlerts")==check:
if annoyanceAlerts.isCrusher(post):
log_it.logg("Crusher Alert at " + p)
if config.retrieveConfig("RotatingPlatformAlerts")==check:
if annoyanceAlerts.isRotatingPlatforms(post):
log_it.logg("Rotating Platforms Alert at " + p)
elapsedTime = time.time()-startTime #for science!
log_it.logg("Finished scraping " + p + " in " + str(elapsedTime), q, genus=["verbose", "benchmarking"])
def scrapeSection(section, q):
'''(url : str, Queue object)
scrape a forum section by creating a thread for every page that needs to be scraped'''
startTime = time.time()
page = scraper.pageClass()
threads = []
page.ret(section)
check = "all"
log_it.logg("Creating post scraping threads for " + section, q, genus=["debug"])
if config.retrieveConfig("NecroDetection") == check or config.retrieveConfig("ProfanityDetection") == check or config.retrieveConfig("CapsTitleDetection") == check: #don't run if nothing wants it to
sectionPosts = page.findAllPosts()
log_it.logg("Found " + str(len(sectionPosts)) + " threads in section " + section, q, genus=["debug", "verbose"])
for postNum in range(len(sectionPosts)):
threads.append(Process(target=scrapePost, args=(sectionPosts[postNum], q, check,)))
threads[postNum].start()
check = "first"
if config.retrieveConfig("NecroDetection") == check or config.retrieveConfig("ProfanityDetection") == check or config.retrieveConfig("CapsTitleDetection") == check: #don't run if nothing wants it to
sectionFirstPost = page.findFirstPost()
threads.append(Process(target=scrapePost, args=(sectionFirstPost, q, check,)))
threads[len(threads)].start()
log_it.logg("Finished creating post scraping threads for " + section, q, genus=["debug"])
for thread in threads: #wait for all created threads to finish
thread.join()
elapsedTime = time.time()-startTime #for science!
log_it.logg("Time elapsed in scraping section " + section + " : " + str(elapsedTime), q, genus=["verbose", "benchmarking"])
def scrapeForums():
'''() -> None
starts the forum section threads'''
threads=[]
log_it.logg("Creating section scraping threads", q, genus=["debug"])
log_it.logg("There are " + str(len(ForumsToCheck.forums)) + " forum sections to scrape", q, genus=["debug", "verbose"])
for n in range(len(ForumsToCheck.forums)):
threads.append(Process(target=scrapeSection, args=(ForumsToCheck.forums[n],q,)))
threads[n].start()
log_it.logg("Finished creating section scraping threads", q, genus=["debug"])
for thread in threads: #wait for all created threads to finish
thread.join()
| gpl-3.0 | -6,662,993,216,560,327,000 | 51.444444 | 202 | 0.675 | false |
zeroSteiner/boltons | boltons/formatutils.py | 1 | 11298 | # -*- coding: utf-8 -*-
"""`PEP 3101`_ introduced the :meth:`str.format` method, and what
would later be called "new-style" string formatting. For the sake of
explicit correctness, it is probably best to refer to Python's dual
string formatting capabilities as *bracket-style* and
*percent-style*. There is overlap, but one does not replace the
other.
* Bracket-style is more pluggable, slower, and uses a method.
* Percent-style is simpler, faster, and uses an operator.
Bracket-style formatting brought with it a much more powerful toolbox,
but it was far from a full one. :meth:`str.format` uses `more powerful
syntax`_, but `the tools and idioms`_ for working with
that syntax are not well-developed nor well-advertised.
``formatutils`` adds several functions for working with bracket-style
format strings:
* :class:`DeferredValue`: Defer fetching or calculating a value
until format time.
* :func:`get_format_args`: Parse the positional and keyword
arguments out of a format string.
* :func:`tokenize_format_str`: Tokenize a format string into
literals and :class:`BaseFormatField` objects.
* :func:`construct_format_field_str`: Assists in progammatic
construction of format strings.
* :func:`infer_positional_format_args`: Converts anonymous
references in 2.7+ format strings to explicit positional arguments
suitable for usage with Python 2.6.
.. _more powerful syntax: https://docs.python.org/2/library/string.html#format-string-syntax
.. _the tools and idioms: https://docs.python.org/2/library/string.html#string-formatting
.. _PEP 3101: https://www.python.org/dev/peps/pep-3101/
"""
# TODO: also include percent-formatting utils?
# TODO: include lithoxyl.formatters.Formatter (or some adaptation)?
from __future__ import print_function
import re
from string import Formatter
__all__ = ['DeferredValue', 'get_format_args', 'tokenize_format_str',
'construct_format_field_str', 'infer_positional_format_args',
'BaseFormatField']
_pos_farg_re = re.compile('({{)|' # escaped open-brace
'(}})|' # escaped close-brace
'({[:!.\[}])') # anon positional format arg
def construct_format_field_str(fname, fspec, conv):
"""
Constructs a format field string from the field name, spec, and
conversion character (``fname``, ``fspec``, ``conv``). See Python
String Formatting for more info.
"""
if fname is None:
return ''
ret = '{' + fname
if conv:
ret += '!' + conv
if fspec:
ret += ':' + fspec
ret += '}'
return ret
def split_format_str(fstr):
"""Does very basic spliting of a format string, returns a list of
strings. For full tokenization, see :func:`tokenize_format_str`.
"""
ret = []
for lit, fname, fspec, conv in Formatter().parse(fstr):
if fname is None:
ret.append((lit, None))
continue
field_str = construct_format_field_str(fname, fspec, conv)
ret.append((lit, field_str))
return ret
def infer_positional_format_args(fstr):
"""Takes format strings with anonymous positional arguments, (e.g.,
"{}" and {:d}), and converts them into numbered ones for explicitness and
compatibility with 2.6.
Returns a string with the inferred positional arguments.
"""
# TODO: memoize
ret, max_anon = '', 0
# look for {: or {! or {. or {[ or {}
start, end, prev_end = 0, 0, 0
for match in _pos_farg_re.finditer(fstr):
start, end, group = match.start(), match.end(), match.group()
if prev_end < start:
ret += fstr[prev_end:start]
prev_end = end
if group == '{{' or group == '}}':
ret += group
continue
ret += '{%s%s' % (max_anon, group[1:])
max_anon += 1
ret += fstr[prev_end:]
return ret
# This approach is hardly exhaustive but it works for most builtins
_INTCHARS = 'bcdoxXn'
_FLOATCHARS = 'eEfFgGn%'
_TYPE_MAP = dict([(x, int) for x in _INTCHARS] +
[(x, float) for x in _FLOATCHARS])
_TYPE_MAP['s'] = str
def get_format_args(fstr):
"""
Turn a format string into two lists of arguments referenced by the
format string. One is positional arguments, and the other is named
arguments. Each element of the list includes the name and the
nominal type of the field.
# >>> get_format_args("{noun} is {1:d} years old{punct}")
# ([(1, <type 'int'>)], [('noun', <type 'str'>), ('punct', <type 'str'>)])
# XXX: Py3k
>>> get_format_args("{noun} is {1:d} years old{punct}") == \
([(1, int)], [('noun', str), ('punct', str)])
True
"""
# TODO: memoize
formatter = Formatter()
fargs, fkwargs, _dedup = [], [], set()
def _add_arg(argname, type_char='s'):
if argname not in _dedup:
_dedup.add(argname)
argtype = _TYPE_MAP.get(type_char, str) # TODO: unicode
try:
fargs.append((int(argname), argtype))
except ValueError:
fkwargs.append((argname, argtype))
for lit, fname, fspec, conv in formatter.parse(fstr):
if fname is not None:
type_char = fspec[-1:]
fname_list = re.split('[.[]', fname)
if len(fname_list) > 1:
raise ValueError('encountered compound format arg: %r' % fname)
try:
base_fname = fname_list[0]
assert base_fname
except (IndexError, AssertionError):
raise ValueError('encountered anonymous positional argument')
_add_arg(fname, type_char)
for sublit, subfname, _, _ in formatter.parse(fspec):
# TODO: positional and anon args not allowed here.
if subfname is not None:
_add_arg(subfname)
return fargs, fkwargs
def tokenize_format_str(fstr, resolve_pos=True):
"""Takes a format string, turns it into a list of alternating string
literals and :class:`BaseFormatField` tokens. By default, also
infers anonymous positional references into explict, numbered
positional references. To disable this behavior set *resolve_pos*
to ``False``.
"""
ret = []
if resolve_pos:
fstr = infer_positional_format_args(fstr)
formatter = Formatter()
for lit, fname, fspec, conv in formatter.parse(fstr):
if lit:
ret.append(lit)
if fname is None:
continue
ret.append(BaseFormatField(fname, fspec, conv))
return ret
class BaseFormatField(object):
"""A class representing a reference to an argument inside of a
bracket-style format string. For instance, in ``"{greeting},
world!"``, there is a field named "greeting".
These fields can have many options applied to them. See the
Python docs on `Format String Syntax`_ for the full details.
.. _Format String Syntax: https://docs.python.org/2/library/string.html#string-formatting
"""
def __init__(self, fname, fspec='', conv=None):
self.set_fname(fname)
self.set_fspec(fspec)
self.set_conv(conv)
def set_fname(self, fname):
"Set the field name."
path_list = re.split('[.[]', fname) # TODO
self.base_name = path_list[0]
self.fname = fname
self.subpath = path_list[1:]
self.is_positional = not self.base_name or self.base_name.isdigit()
def set_fspec(self, fspec):
"Set the field spec."
fspec = fspec or ''
subfields = []
for sublit, subfname, _, _ in Formatter().parse(fspec):
if subfname is not None:
subfields.append(subfname)
self.subfields = subfields
self.fspec = fspec
self.type_char = fspec[-1:]
self.type_func = _TYPE_MAP.get(self.type_char, str)
def set_conv(self, conv):
"""There are only two built-in converters: ``s`` and ``r``. They are
somewhat rare and appearlike ``"{ref!r}"``."""
# TODO
self.conv = conv
self.conv_func = None # TODO
@property
def fstr(self):
"The current state of the field in string format."
return construct_format_field_str(self.fname, self.fspec, self.conv)
def __repr__(self):
cn = self.__class__.__name__
args = [self.fname]
if self.conv is not None:
args.extend([self.fspec, self.conv])
elif self.fspec != '':
args.append(self.fspec)
args_repr = ', '.join([repr(a) for a in args])
return '%s(%s)' % (cn, args_repr)
def __str__(self):
return self.fstr
_UNSET = object()
class DeferredValue(object):
""":class:`DeferredValue` is a wrapper type, used to defer computing
values which would otherwise be expensive to stringify and
format. This is most valuable in areas like logging, where one
would not want to waste time formatting a value for a log message
which will subsequently be filtered because the message's log
level was DEBUG and the logger was set to only emit CRITICAL
messages.
The :class:``DeferredValue`` is initialized with a callable that
takes no arguments and returns the value, which can be of any
type. By default DeferredValue only calls that callable once, and
future references will get a cached value. This behavior can be
disabled by setting *cache_value* to ``False``.
Args:
func (function): A callable that takes no arguments and
computes the value being represented.
cache_value (bool): Whether subsequent usages will call *func*
again. Defaults to ``True``.
>>> import sys
>>> dv = DeferredValue(lambda: len(sys._current_frames()))
>>> output = "works great in all {0} threads!".format(dv)
PROTIP: To keep lines shorter, use: ``from formatutils import
DeferredValue as DV``
"""
def __init__(self, func, cache_value=True):
self.func = func
self.cache_value = True
self._value = _UNSET
def get_value(self):
"""Computes, optionally caches, and returns the value of the
*func*. If ``get_value()`` has been called before, a cached
value may be returned depending on the *cache_value* option
passed to the constructor.
"""
if self._value is not _UNSET and self.cache_value:
value = self._value
else:
value = self.func()
if self.cache_value:
self._value = value
return value
def __int__(self):
return int(self.get_value())
def __float__(self):
return float(self.get_value())
def __str__(self):
return str(self.get_value())
def __unicode__(self):
return unicode(self.get_value())
def __repr__(self):
return repr(self.get_value())
def __format__(self, fmt):
value = self.get_value()
pt = fmt[-1:] # presentation type
type_conv = _TYPE_MAP.get(pt, str)
try:
return value.__format__(fmt)
except (ValueError, TypeError):
# TODO: this may be overkill
return type_conv(value).__format__(fmt)
# end formatutils.py
| bsd-3-clause | 5,633,073,641,725,229,000 | 33.340426 | 93 | 0.610816 | false |
iandees/all-the-places | locations/spiders/sunloan.py | 1 | 2377 | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
DAYS={
'Monday':'Mo',
'Tuesday':'Tu',
'Wednesday':'We',
'Friday':'Fr',
'Thursday':'Th',
'Saturday':'Sa',
'Sunday':'Su',
}
class SunLoanSpider(scrapy.Spider):
name = "sunloan"
allowed_domains = ["sunloan.com"]
start_urls = (
'https://www.sunloan.com/locations/',
)
download_delay = 0.5
def parse(self, response):
urls = response.xpath('//div[@id="custom-locations-2"]//div[@class="location-box"]/div/p/strong/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
def parse_store(self, response):
try:
data = response.xpath('//script[contains(text(),"latitude")]/text()').extract_first()
data = json.loads(data)
except TypeError:
return
except json.JSONDecodeError:
data = data.replace('"hasMap": \r\n', '')
data = json.loads(data)
if not data:
return
properties = {
'lat' : float(data['geo']['latitude']),
'lon' : float(data['geo']['longitude']),
'website' : response.url,
'ref' : response.url,
'addr_full' : data['address']['streetAddress'],
'city' : data['address']['addressLocality'],
'state' : data['address']['addressRegion'],
'postcode' : data['address']['postalCode'],
'country' : 'US',
'name' : data['name'],
}
try:
hours = data['openingHours']
if hours:
properties['opening_hours'] = hours
except:
pass
yield GeojsonPointItem(**properties)
# yield GeojsonPointItem(
# lat=float(data['geo']['latitude']),
# lon=float(data['geo']['longitude']),
# website=response.url,
# ref=response.url,
# #opening_hours=data['openingHours'],
# addr_full=data['address']['streetAddress'],
# city=data['address']['addressLocality'],
# state=data['address']['addressRegion'],
# postcode=data['address']['postalCode'],
# country='US',
# name=data['name'],
# )
| mit | 2,616,339,130,988,153,000 | 27.987805 | 123 | 0.519983 | false |
zsdonghao/tensorlayer | setup.py | 1 | 5906 | #!/usr/bin/env python
import codecs
import os
import sys
os.environ['TENSORLAYER_PACKAGE_BUILDING'] = 'True'
try:
from setuptools import find_packages, setup, Extension
from setuptools.command.build_ext import build_ext
except ImportError:
from distutils.core import (
setup,
find_packages
)
from tensorlayer import (
__contact_emails__,
__contact_names__,
__description__,
__download_url__,
__homepage__,
__keywords__,
__license__,
__package_name__,
__repository_url__,
__version__
)
# =================== Reading Readme file as TXT files ===================
if os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'),
'r', 'utf-8'
).read()
else:
long_description = 'See ' + __homepage__
# ======================= Reading Requirements files as TXT files =======================
def req_file(filename, folder="requirements"):
with open(os.path.join(folder, filename)) as f:
content = f.readlines()
# you may also want to remove whitespace characters
# Example: `\n` at the end of each line
return [x.strip() for x in content]
# ======================= Defining the requirements var =======================
install_requires = req_file("requirements.txt")
extras_require = {
# User packages
'tf_cpu': req_file("requirements_tf_cpu.txt"),
'tf_gpu': req_file("requirements_tf_gpu.txt"),
'extra': req_file("requirements_extra.txt"),
# Contrib Packages
'contrib_loggers': req_file("requirements_contrib_loggers.txt"),
# Dev Packages
'test': req_file("requirements_test.txt"),
'dev': req_file("requirements_dev.txt"),
'doc': req_file("requirements_doc.txt"),
'db': req_file("requirements_db.txt"),
}
extras_require['all'] = sum([extras_require.get(key) for key in ['extra', 'contrib_loggers']], list())
extras_require['all_cpu'] = sum([extras_require.get(key) for key in ['all', 'tf_cpu']], list())
extras_require['all_gpu'] = sum([extras_require.get(key) for key in ['all', 'tf_gpu']], list())
extras_require['all_dev'] = sum([extras_require.get(key) for key in ['all', 'db', 'dev', 'doc', 'test']], list())
extras_require['all_cpu_dev'] = sum([extras_require.get(key) for key in ['all_dev', 'tf_cpu']], list())
extras_require['all_gpu_dev'] = sum([extras_require.get(key) for key in ['all_dev', 'tf_gpu']], list())
cmdclass = dict()
ext_modules = []
# Readthedocs requires TF 1.5.0 to build properly
if 'READTHEDOCS' in os.environ:
ext_modules = [
Extension('install_requirements_for_rtd', []),
]
class custom_build_ext(build_ext):
def build_extensions(self):
os.system('./scripts/install-requirements-for-rtd.sh %s' %
os.path.dirname(sys.executable))
cmdclass = {'build_ext': custom_build_ext}
# ======================= Define the package setup =======================
setup(
name=__package_name__,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=__description__,
long_description=long_description,
# The project's main homepage.
url=__repository_url__,
download_url=__download_url__,
# Author details
author=__contact_names__,
author_email=__contact_emails__,
# maintainer Details
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
# The licence under which the project is released
license=__license__,
classifiers=[
# How mature is this project? Common values are
# 1 - Planning
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 6 - Mature
# 7 - Inactive
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
# Indicate what your project relates to
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
# Additionnal Settings
'Environment :: Console',
'Natural Language :: English',
'Operating System :: OS Independent',
],
keywords=__keywords__,
packages=find_packages(),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=install_requires,
cmdclass=cmdclass,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# $ pip install -e .[test]
extras_require=extras_require,
ext_modules=ext_modules,
scripts=[
'tl',
],
)
| apache-2.0 | 7,494,177,966,048,388,000 | 29.443299 | 113 | 0.613952 | false |
PiaBianca/PyMaster | pymasterlib/ask.py | 1 | 1995 | # PyMaster
# Copyright (C) 2014, 2015 FreedomOfRestriction <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pymasterlib as lib
from pymasterlib.constants import *
def load_text(ID):
return lib.message.load_text("ask", ID)
def what():
m = load_text("ask_what")
c = [load_text("choice_rules"), load_text("choice_chore"),
load_text("choice_punishments"), load_text("choice_nothing")]
choice = lib.message.get_choice(m, c, len(c) - 1)
if choice == 0:
lib.scripts.show_rules()
elif choice == 1:
chore()
elif choice == 2:
punishments()
def chore():
if lib.slave.queued_chore is not None:
lib.message.show(lib.slave.queued_chore["text"])
else:
lib.message.show(load_text("no_chore"))
def punishments():
lib.slave.forget()
punishments_ = []
for i in lib.slave.misdeeds:
for misdeed in lib.slave.misdeeds[i]:
if not misdeed["punished"] and misdeed["punishment"] is not None:
punishments_.append(misdeed["punishment"])
if punishments_:
if len(punishments_) > 1:
m = load_text("punishments").format(len(punishments_))
lib.message.show(m)
for punishment in punishments_:
lib.message.show(punishment["text"])
else:
lib.message.show(load_text("no_punishments"))
| gpl-3.0 | -6,075,815,982,666,345,000 | 30.666667 | 86 | 0.66416 | false |
Noirello/bonsai | docs/conf.py | 1 | 9146 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# bonsai documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 18 21:30:25 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path[0:0] = [os.path.abspath('..')]
# For read-the-docs: mocking the _bonsai module.
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
# A hack to get the objects implemented in C.
if name == 'ldapconnection':
return object
if name == 'ldapentry':
return object
if name == 'ldapsearchiter':
return object
MOCK_MODULES = ['src.bonsai._bonsai', 'bonsai', 'bonsai._bonsai', 'bonsai.asyncio']
try:
import typing
except ImportError:
MOCK_MODULES.append('typing')
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import src.bonsai as bonsai
#from src.bonsai import asyncio as bonsai_asyncio
#from src.bonsai import ldaif as bonsai_ldif
sys.modules['bonsai'] = bonsai
sys.modules['bonsai.asyncio'] = bonsai.asyncio
autodoc_mock_imports = ["gevent"]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'bonsai'
copyright = '2014-2020, noirello'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = bonsai.__version__
# The full version, including alpha/beta/rc tags.
release = bonsai.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'classic'
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bonsaidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bonasi.tex', 'bonsai Documentation',
'noirello', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bonsai', 'bonsai Documentation',
['noirello'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bonsai', 'bonsai Documentation',
'noirello', 'bonsai', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -5,116,172,858,655,280,000 | 29.898649 | 83 | 0.702165 | false |
dark1729dragon/pixutils | pixutils/vplayer/PlayImgs.py | 1 | 2169 | from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
from .videoutils import *
def default_labeller(x):
try:
return int(x.split('_')[1].split('.')[0])
except:
try:
return int(basename(x).split('_')[1].split('.')[0])
except:
return 0
class Im2Video():
'''
def labeller(impath):
return impath.replace('.jpg','').split('_')[1]
vpath = join(dbpath, r'videoimgs/*.*')
cam = Imgs2Video(vpath, labeller)
video = Player(cam)
cam = ThreadIt(cam)
imshow = win(video)
for fno, img in video.play():
imshow('show_video', img, 1)
'''
def __init__(self, opaths, labeller=None):
labeller = labeller or default_labeller
if type(opaths) not in (list, tuple):
paths = glob(opaths)
else:
paths = opaths
if not paths:
raise Exception('No file found in %s' % opaths)
paths = [(int(labeller(path)), path) for path in paths]
self.paths = sorted(paths, key=lambda x: x[0])
self.frameno, self.paths = list(zip(*self.paths))
self.row, self.col = cv2.imread(self.paths[0]).shape[:2]
self.index = -1
def release(self):
pass
def read(self):
self.index += 1
if len(self.paths) <= self.index:
return False, None
try:
return True, cv2.imread(self.paths[self.index])
except:
return None, None
def get(self, i):
if i == 3:
return self.col
elif i == 4:
return self.row
elif i == 5:
return 30
elif i == 7:
return len(self.paths)
def set(self, i, start_frame):
self.index += (start_frame - 1)
def GetFeed(vpath, *a, **kw):
if type(vpath) == int:
return 'stream', cv2.VideoCapture(vpath)
elif type(vpath) in (list,tuple) or '*' in vpath:
return 'imgs', Im2Video(vpath, *a, **kw)
else:
assert exists(vpath), 'Video File missing: %s' % vpath
return 'video', cv2.VideoCapture(vpath) | bsd-2-clause | 8,355,155,217,002,081,000 | 27.552632 | 125 | 0.545874 | false |
jonathf/chaospy | chaospy/distributions/baseclass/operator.py | 1 | 2655 | """Operator transformation."""
import numpy
import chaospy
from ..baseclass import Distribution
class OperatorDistribution(Distribution):
"""Operator transformation."""
def __init__(self, left, right, exclusion=None, repr_args=None):
if not isinstance(left, Distribution):
left = numpy.atleast_1d(left)
if left.ndim > 1:
raise chaospy.UnsupportedFeature(
"distribution operators limited to at-most 1D arrays.")
if not isinstance(right, Distribution):
right = numpy.atleast_1d(right)
if right.ndim > 1:
raise chaospy.UnsupportedFeature(
"distribution operators limited to at-most 1D arrays.")
dependencies, parameters, rotation = chaospy.declare_dependencies(
distribution=self,
parameters=dict(left=left, right=right),
is_operator=True,
)
super(OperatorDistribution, self).__init__(
parameters=parameters,
dependencies=dependencies,
exclusion=exclusion,
repr_args=repr_args,
)
self._cache_copy = {}
self._lower_cache = {}
self._upper_cache = {}
def get_parameters(self, idx, cache, assert_numerical=True):
parameters = super(OperatorDistribution, self).get_parameters(
idx, cache, assert_numerical=assert_numerical)
assert set(parameters) == {"cache", "left", "right", "idx"}
if isinstance(parameters["left"], Distribution):
parameters["left"] = parameters["left"]._get_cache(idx, cache=parameters["cache"], get=0)
elif len(parameters["left"]) > 1 and idx is not None:
parameters["left"] = parameters["left"][idx]
if isinstance(parameters["right"], Distribution):
parameters["right"] = parameters["right"]._get_cache(idx, cache=parameters["cache"], get=0)
elif len(parameters["right"]) > 1 and idx is not None:
parameters["right"] = parameters["right"][idx]
if assert_numerical:
assert (not isinstance(parameters["left"], Distribution) or
not isinstance(parameters["right"], Distribution))
if cache is not self._cache_copy:
self._cache_copy = cache
self._lower_cache = {}
self._upper_cache = {}
if idx is None:
del parameters["idx"]
return parameters
def _cache(self, idx, cache, get):
assert get == 0
parameters = self.get_parameters(idx, cache)
return self._operator(parameters["left"], parameters["right"])
| mit | 8,029,556,839,526,679,000 | 39.227273 | 103 | 0.59548 | false |
usingnamespace/pyramid_authsanity | src/pyramid_authsanity/policy.py | 1 | 6144 | import base64
import os
from pyramid.authorization import Authenticated, Everyone
from pyramid.interfaces import IAuthenticationPolicy, IDebugLogger
from zope.interface import implementer
from .util import _find_services, _session_registered, add_vary_callback
def _clean_principal(princid):
"""Utility function that cleans up the passed in principal
This can easily also be extended for example to make sure that certain
usernames are automatically off-limits.
"""
if princid in (Authenticated, Everyone):
princid = None
return princid
_marker = object()
@implementer(IAuthenticationPolicy)
class AuthServicePolicy(object):
def _log(self, msg, methodname, request):
logger = request.registry.queryUtility(IDebugLogger)
if logger:
cls = self.__class__
classname = cls.__module__ + "." + cls.__name__
methodname = classname + "." + methodname
logger.debug(methodname + ": " + msg)
_find_services = staticmethod(_find_services) # Testing
_session_registered = staticmethod(_session_registered) # Testing
_have_session = _marker
def __init__(self, debug=False):
self.debug = debug
def unauthenticated_userid(self, request):
""" We do not allow the unauthenticated userid to be used. """
def authenticated_userid(self, request):
""" Returns the authenticated userid for this request. """
debug = self.debug
(sourcesvc, authsvc) = self._find_services(request)
request.add_response_callback(add_vary_callback(sourcesvc.vary))
try:
userid = authsvc.userid()
except Exception:
debug and self._log(
"authentication has not yet been completed",
"authenticated_userid",
request,
)
(principal, ticket) = sourcesvc.get_value()
debug and self._log(
"source service provided information: (principal: %r, ticket: %r)"
% (principal, ticket),
"authenticated_userid",
request,
)
# Verify the principal and the ticket, even if None
authsvc.verify_ticket(principal, ticket)
try:
# This should now return None or the userid
userid = authsvc.userid()
except Exception:
userid = None
debug and self._log(
"authenticated_userid returning: %r" % (userid,),
"authenticated_userid",
request,
)
return userid
def effective_principals(self, request):
""" A list of effective principals derived from request. """
debug = self.debug
effective_principals = [Everyone]
userid = self.authenticated_userid(request)
(_, authsvc) = self._find_services(request)
if userid is None:
debug and self._log(
"authenticated_userid returned %r; returning %r"
% (userid, effective_principals),
"effective_principals",
request,
)
return effective_principals
if _clean_principal(userid) is None:
debug and self._log(
(
"authenticated_userid returned disallowed %r; returning %r "
"as if it was None" % (userid, effective_principals)
),
"effective_principals",
request,
)
return effective_principals
effective_principals.append(Authenticated)
effective_principals.append(userid)
effective_principals.extend(authsvc.groups())
debug and self._log(
"returning effective principals: %r" % (effective_principals,),
"effective_principals",
request,
)
return effective_principals
def remember(self, request, principal, **kw):
""" Returns a list of headers that are to be set from the source service. """
debug = self.debug
if self._have_session is _marker:
self._have_session = self._session_registered(request)
prev_userid = self.authenticated_userid(request)
(sourcesvc, authsvc) = self._find_services(request)
request.add_response_callback(add_vary_callback(sourcesvc.vary))
value = {}
value["principal"] = principal
value["ticket"] = ticket = (
base64.urlsafe_b64encode(os.urandom(32)).rstrip(b"=").decode("ascii")
)
debug and self._log(
"Remember principal: %r, ticket: %r" % (principal, ticket),
"remember",
request,
)
authsvc.add_ticket(principal, ticket)
# Clear the previous session
if self._have_session:
if prev_userid != principal:
request.session.invalidate()
else:
# We are logging in the same user that is already logged in, we
# still want to generate a new session, but we can keep the
# existing data
data = dict(request.session.items())
request.session.invalidate()
request.session.update(data)
request.session.new_csrf_token()
return sourcesvc.headers_remember([principal, ticket])
def forget(self, request):
""" A list of headers which will delete appropriate cookies."""
debug = self.debug
if self._have_session is _marker:
self._have_session = self._session_registered(request)
(sourcesvc, authsvc) = self._find_services(request)
request.add_response_callback(add_vary_callback(sourcesvc.vary))
(_, ticket) = sourcesvc.get_value()
debug and self._log("Forgetting ticket: %r" % (ticket,), "forget", request)
authsvc.remove_ticket(ticket)
# Clear the session by invalidating it
if self._have_session:
request.session.invalidate()
return sourcesvc.headers_forget()
| isc | -7,210,039,205,602,359,000 | 32.032258 | 85 | 0.585938 | false |
CroceRossaItaliana/jorvik | formazione/migrations/0035_auto_20190510_1149.py | 1 | 2215 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2019-05-10 11:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('formazione', '0034_auto_20190408_1047'),
]
operations = [
migrations.CreateModel(
name='RelazioneCorso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creazione', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('ultima_modifica', models.DateTimeField(auto_now=True, db_index=True)),
('note_esplicative', models.TextField(help_text='note esplicative in relazione ai cambiamenti effettuati rispetto alla programmazione approvata in fase di pianificazione iniziale del corso.', verbose_name='Note esplicative')),
('raggiungimento_obiettivi', models.TextField(help_text="Analisi sul raggiungimento degli obiettivi del corso (generali rispetto all'evento e specifici di apprendimento).", verbose_name='Raggiungimento degli obiettivi del corso')),
('annotazioni_corsisti', models.TextField(verbose_name='Annotazioni relative alla partecipazione dei corsisti')),
('annotazioni_risorse', models.TextField(help_text='Annotazioni relative a risorse e competenze di particolare rilevanza emerse durante il percorso formativo')),
('annotazioni_organizzazione_struttura', models.TextField(help_text="Annotazioni e segnalazioni sull'organizzazione e la logistica e della struttura ospitante il corso")),
('descrizione_attivita', models.TextField(help_text='Descrizione delle eventuali attività di tirocinio/affiancamento con indicazione dei Tutor')),
('corso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='formazione.CorsoBase')),
],
options={
'verbose_name': 'Relazione del Direttore',
'verbose_name_plural': 'Relazioni dei Direttori',
},
),
]
| gpl-3.0 | 6,304,392,364,393,148,000 | 60.5 | 247 | 0.681572 | false |
kgullikson88/General | Analyze_CCF.py | 1 | 9048 | """
This is a module to read in an HDF5 file with CCFs.
Use this to determine the best parameters, and plot the best CCF for each star/date
"""
from collections import defaultdict
import logging
import h5py
import numpy as np
import pandas as pd
from scipy.interpolate import InterpolatedUnivariateSpline as spline
class CCF_Interface(object):
def __init__(self, filename, vel=np.arange(-900, 900, 1)):
self.hdf5 = h5py.File(filename, 'r')
self.velocities = vel
self._df = None
def __getitem__(self, path):
return self.hdf5[path]
def list_stars(self, print2screen=False):
"""
List the stars available in the HDF5 file, and the dates available for each
:return: A list of the stars
"""
if print2screen:
for star in sorted(self.hdf5.keys()):
print(star)
for date in sorted(self.hdf5[star].keys()):
print('\t{}'.format(date))
return sorted(self.hdf5.keys())
def list_dates(self, star, print2screen=False):
"""
List the dates available for the given star
:param star: The name of the star
:return: A list of dates the star was observed
"""
if print2screen:
for date in sorted(self.hdf5[star].keys()):
print(date)
return sorted(self.hdf5[star].keys())
def load_cache(self, addmode='simple'):
"""
Read in the whole HDF5 file. This will take a while and take a few Gb of memory, but will speed things up considerably
:keyword addmode: The way the individual CCFs were added. Options are:
- 'simple'
- 'ml'
- 'all' (saves all addmodes)
"""
self._df = self._compile_data(addmode=addmode)
def _compile_data(self, starname=None, date=None, addmode='simple', read_ccf=True):
"""
Private function. This reads in all the datasets for the given star and date
:param starname: the name of the star. Must be in self.hdf5
:param date: The date to search. Must be in self.hdf5[star]
:keyword addmode: The way the individual CCFs were added. Options are:
- 'simple'
- 'ml'
- 'all' (saves all addmodes)
:return: a pandas DataFrame with the columns:
- star
- date
- temperature
- log(g)
- [Fe/H]
- vsini
- addmode
- rv (at maximum CCF value)
- CCF height (maximum)
"""
if starname is None:
df_list = []
star_list = self.list_stars()
for star in star_list:
date_list = self.list_dates(star)
for date in date_list:
logging.debug('Reading in metadata for star {}, date {}'.format(star, date))
df_list.append(self._compile_data(star, date, addmode=addmode, read_ccf=read_ccf))
return pd.concat(df_list, ignore_index=True)
elif starname is not None and date is None:
df_list = []
date_list = self.list_dates(starname)
for date in date_list:
logging.debug('Reading in metadata for date {}'.format(date))
df_list.append(self._compile_data(starname, date, addmode=addmode, read_ccf=read_ccf))
return pd.concat(df_list, ignore_index=True)
else:
if self._df is not None:
return self._df.loc[(self._df['Star'] == starname) & (self._df['Date'] == date)].copy()
#print('Stars: ', self.list_stars())
datasets = self.hdf5[starname][date].keys()
data = defaultdict(list)
for ds_name, ds in self.hdf5[starname][date].iteritems(): # in datasets:
#ds = self.hdf5[starname][date][ds_name]
try:
am = ds.attrs['addmode']
if addmode == 'all' or addmode == am:
data['T'].append(ds.attrs['T'])
data['logg'].append(ds.attrs['logg'])
data['[Fe/H]'].append(ds.attrs['[Fe/H]'])
data['vsini'].append(ds.attrs['vsini'])
data['addmode'].append(am)
data['name'].append(ds.name)
try:
data['ccf_max'].append(ds.attrs['ccf_max'])
data['vel_max'].append(ds.attrs['vel_max'])
except KeyError:
vel, corr = ds.value
idx = np.argmax(corr)
data['ccf_max'].append(corr[idx])
data['vel_max'].append(vel[idx])
if read_ccf:
v = ds.value
vel, corr = v[0], v[1]
sorter = np.argsort(vel)
fcn = spline(vel[sorter], corr[sorter])
data['ccf'].append(fcn(self.velocities))
except:
raise IOError('Something weird happened with dataset {}!'.format(ds.name))
data['Star'] = [starname] * len(data['T'])
data['Date'] = [date] * len(data['T'])
df = pd.DataFrame(data=data)
return df
def get_temperature_run(self, starname=None, date=None, df=None):
"""
Return the maximum ccf height for each temperature. Either starname AND date, or df must be given
:param starname: The name of the star
:param date: The date of the observation
:param df: Input dataframe, such as from _compile_data. Overrides starname and date, if given
:return: a pandas DataFrame with all the best parameters for each temperature
"""
# Get the dataframe if it isn't given
if df is None:
if starname is None or date is None:
raise ValueError('Must give either starname or date to get_temperature_run!')
df = self._compile_data(starname, date)
# Find the maximum CCF for each set of parameters
fcn = lambda row: (np.max(row), self.velocities[np.argmax(row)])
vals = df['ccf'].map(fcn)
df['ccf_max'] = vals.map(lambda l: l[0])
df['rv'] = vals.map(lambda l: l[1])
# Find the best parameters for each temperature
d = defaultdict(list)
temperatures = pd.unique(df['T'])
for T in temperatures:
good = df.loc[df['T'] == T]
best = good.loc[good.ccf_max == good.ccf_max.max()]
d['vsini'].append(best['vsini'].item())
d['logg'].append(best['logg'].item())
d['[Fe/H]'].append(best['[Fe/H]'].item())
d['rv'].append(best['rv'].item())
d['ccf_value'].append(best.ccf_max.item())
d['T'].append(T)
d['metal'].append(best['[Fe/H]'].item())
return pd.DataFrame(data=d)
def get_ccf(self, params, df=None):
"""
Get the ccf with the given parameters. A dataframe can be given to speed things up
:param params: All the parameters necessary to define a single ccf. This should be
a python dictionary with the keys:
- 'starname': The name of the star. Try self.list_stars() for the options.
- 'date': The UT date of the observations. Try self.list_dates() for the options.
- 'T': temperature of the model
- 'logg': the log(g) of the model
- 'vsini': the vsini by which the model was broadened before correlation
- '[Fe/H]': the metallicity of the model
- 'addmode': The way the order CCFs were added to make a total one. Can be:
- 'simple'
- 'ml'
- 'weighted'
- 'dc'
:param df: a pandas DataFrame such as outputted by _compile_data
:return: a pandas DataFrame with columns of velocity and CCF power
"""
if df is None:
try:
df = self._compile_data(params['starname'], params['date'])
except KeyError:
raise KeyError('Must give get_ccf params with starname and date keywords, if df is not given!')
Tvals = df['T'].unique()
T = Tvals[np.argmin(abs(Tvals - params['T']))]
good = df.loc[(df['T'] == T) & (df.logg == params['logg']) & (df.vsini == params['vsini']) \
& (df['[Fe/H]'] == params['[Fe/H]']) & (df.addmode == params['addmode'])]
return pd.DataFrame(data={'velocity': self.velocities, 'CCF': good['ccf'].item()})
| gpl-3.0 | 5,668,383,523,431,874,000 | 42.710145 | 126 | 0.513263 | false |
clausqr/HTPC-Manager | modules/qbittorrent.py | 1 | 12490 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import htpc
import cherrypy
import json
import logging
import time
import math
from cherrypy.lib.auth2 import require
from htpc.helpers import striphttp, sizeof
import requests
from requests.auth import HTTPDigestAuth
class Qbittorrent(object):
session = requests.Session()
def __init__(self):
self.logger = logging.getLogger('modules.qbittorrent')
self.newapi = False
self.authenticated = False
self.testapi = None
htpc.MODULES.append({
'name': 'qBittorrent',
'id': 'qbittorrent',
'test': htpc.WEBDIR + 'qbittorrent/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'qbittorrent_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'qbittorrent_name'},
{'type': 'text', 'label': 'IP / Host', 'placeholder': 'localhost', 'name': 'qbittorrent_host'},
{'type': 'text', 'label': 'Port', 'placeholder': '8080', 'name': 'qbittorrent_port'},
{'type': 'text', 'label': 'Username', 'name': 'qbittorrent_username'},
{'type': 'password', 'label': 'Password', 'name': 'qbittorrent_password'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'qbittorrent_ssl'},
{'type': 'text', 'label': 'Reverse proxy link', 'placeholder': '', 'desc': 'Reverse proxy link ex: https://qbt.domain.com', 'name': 'qbittorrent_reverse_proxy_link'},
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('qbittorrent.html').render(scriptname='qbittorrent', webinterface=self.webinterface())
def webinterface(self):
host = striphttp(htpc.settings.get('qbittorrent_host', ''))
port = htpc.settings.get('qbittorrent_port', '')
ssl = 's' if htpc.settings.get('qbittorrent_ssl', 0) else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
if htpc.settings.get('qbittorrent_reverse_proxy_link'):
url = htpc.settings.get('qbittorrent_reverse_proxy_link')
return url
def qbturl(self):
host = striphttp(htpc.settings.get('qbittorrent_host', ''))
port = htpc.settings.get('qbittorrent_port', '')
ssl = 's' if htpc.settings.get('qbittorrent_ssl', 0) else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
return url
@cherrypy.expose()
@require()
def login(self):
self.logger.debug('Trying to login to qbittorrent')
try:
d = {'username': htpc.settings.get('qbittorrent_username', ''),
'password': htpc.settings.get('qbittorrent_password', '')
}
# F33d da cookie monster
r = self.session.post(self.qbturl() + 'login', data=d, verify=False, timeout=5)
if r.content == 'Ok.':
self.logger.debug('Successfully logged in with new api')
self.authenticated = True
self.newapi = True
else:
self.logger.error('Check your username and password')
return r.content
except Exception as e:
self.logger.error('Failed to auth with new api %s' % e)
return
def _fetch(self, u, post=False, params={}, data=None):
host = striphttp(htpc.settings.get('qbittorrent_host', ''))
port = htpc.settings.get('qbittorrent_port', '')
ssl = 's' if htpc.settings.get('qbittorrent_ssl') else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
username = htpc.settings.get('qbittorrent_username', '')
password = htpc.settings.get('qbittorrent_password', '')
url += u
if self.testapi is None:
self.ping()
if self.newapi:
if self.authenticated is False:
self.login()
if post:
if self.newapi:
r = self.session.post(url, data=data, verify=False, timeout=8)
else:
r = self.session.post(url, data=data, verify=False, timeout=8, auth=HTTPDigestAuth(username, password))
else:
if self.newapi:
r = self.session.get(url, verify=False, timeout=8)
else:
r = self.session.get(url, verify=False, timeout=8, auth=HTTPDigestAuth(username, password))
return r
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def fetch(self):
try:
if self.newapi:
result = self._fetch('query/torrents?filter=all&sort=size&reverse=false')
torrents = result.json()
l = []
for torrent in torrents:
t = {}
for k, v in torrent.items():
t[k] = v
if k == 'size':
t['size'] = sizeof(int(v))
if k == 'eta':
eta = time.strftime('%H:%M:%S', time.gmtime(v))
if eta == '00:00:00':
eta = u'\u221E'
t['eta'] = eta
if k == 'ratio':
t['ratio'] = math.ceil(v)
l.append(t)
return l
else:
result = self._fetch('json/torrents')
# r.json() does not like the infinity
return json.loads(result.content)
except Exception as e:
self.logger.error("Couldn't get torrents %s" % e)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def get_speed(self):
''' Get total download and upload speed '''
try:
d = {}
if not self.newapi:
result = self._fetch('json/transferInfo/')
result = result.json()
speeddown = result['dl_info']
speedup = result['up_info']
list_of_down = speeddown.split()
list_of_up = speedup.split()
ds = list_of_down[1] + ' ' + list_of_down[2]
dlstat = list_of_down[5] + ' ' + list_of_down[6]
us = list_of_up[1] + ' ' + list_of_up[2]
ulstat = list_of_down[5] + ' ' + list_of_down[6]
d = {
'qbittorrent_speed_down': ds,
'qbittorrent_speed_up': us,
'qbittorrent_total_dl': dlstat,
'qbittorrent_total_ul': ulstat
}
else:
# new api stuff
result = self._fetch('query/transferInfo')
result = result.json()
d = {
'qbittorrent_speed_down': sizeof(result['dl_info_speed']),
'qbittorrent_speed_up': sizeof(result['up_info_speed']),
'qbittorrent_total_dl': sizeof(result['dl_info_data']),
'qbittorrent_total_ul': sizeof(result['up_info_data'])
}
return d
except Exception as e:
self.logger.error("Couldn't get total download and uploads speed %s" % e)
def get_global_dl_limit(self):
try:
result = self._fetch('command/getGlobalDlLimit/')
speed = int(result.content)
speed /= 1024
return speed
except Exception as e:
self.logger.error("Couldn't get global download limit %s" % e)
def get_global_ul_limit(self):
try:
result = self._fetch('command/getGlobalUpLimit')
speed = int(result.content)
speed /= 1024
return speed
except Exception as e:
self.logger.error("Couldn't get global upload limit %s" % e)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def get_global_limit(self):
try:
d = {}
d['dl_limit'] = self.get_global_dl_limit()
d['ul_limit'] = self.get_global_ul_limit()
return d
except Exception as e:
self.logger.debug("Couldn't get global upload and download limits %s" % e)
@cherrypy.expose()
@require()
def command(self, cmd=None, hash=None, name=None, dlurl=None):
''' Handles pause, resume, delete singel torrents '''
try:
self.logger.debug('%s %s' % (cmd, name))
data = {}
if cmd == 'delete':
data['hashes'] = hash
elif cmd == 'download':
data['urls'] = dlurl
elif cmd == 'resumeall' or cmd == 'pauseall':
# this does not work, bug in qbt see
# https://github.com/qbittorrent/qBittorrent/issues/3016
if self.newapi:
cmd = cmd[:-3] + 'All'
else:
data['hash'] = hash
url = 'command/%s' % cmd
# data is form encode..
r = self._fetch(url, post=True, data=data)
return r.content
except Exception as e:
self.logger.error('Failed at %s %s %s %s' % (cmd, name, hash, e))
@cherrypy.expose()
@require()
def to_client(self, link, torrentname, **kwargs):
''' Is used by torrent search '''
try:
url = 'command/download/'
data = {}
data['urls'] = link
return self._fetch(url, data=data, post=True)
self.logger.info('%s %s is sendt to qBittorrent' % (torrentname, link))
except Exception as e:
self.logger.error('Failed to send %s %s to qBittorrent %s' % (link, torrentname, e))
@cherrypy.expose()
@require()
def set_speedlimit(self, type=None, speed=None):
''' Sets global upload and download speed '''
try:
self.logger.debug('Setting %s to %s' % (type, speed))
speed = int(speed)
if speed == 0:
speed = 0
else:
speed = speed * 1024
url = 'command/' + type + '/'
data = {}
data['limit'] = speed
r = self._fetch(url, data=data, post=True)
return r.content
except Exception as e:
self.logger.error('Failed to set %s to %s %s' % (type, speed, e))
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def ping(self, qbittorrent_host='', qbittorrent_port='', qbittorrent_username='', qbittorrent_password='', qbittorrent_ssl=False, **kw):
self.logger.debug('Trying to connect to qBittorret')
host = qbittorrent_host or htpc.settings.get('qbittorrent_host')
port = qbittorrent_port or htpc.settings.get('qbittorrent_port')
username = qbittorrent_username or htpc.settings.get('qbittorrent_username')
password = qbittorrent_password or htpc.settings.get('qbittorrent_password')
ssl = 's' if qbittorrent_ssl or htpc.settings.get('qbittorrent_ssl') else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
self.newapi = False
self.authenticated = False
try:
# We assume that its atleast 3.2 if this works.
r = requests.get(url + 'version/api', timeout=8, verify=False)
self.logger.debug('Trying to connect with new API %s' % r.url)
# Old api returns a empty page
if r.content != '' and r.ok:
self.newapi = r.content
self.testapi = True
return r.content
else:
raise requests.ConnectionError
except Exception as e:
self.logger.debug('Failed to figure out what api version, trying old API')
try:
r = requests.post(url + 'json/torrents', auth=HTTPDigestAuth(username, password), timeout=10, verify=False)
if r.ok:
self.logger.debug('Old API works %s' % r.url)
# Disable new api stuff
self.testapi = True
self.newapi = False
self.authenticated = False
except Exception as e:
self.newapi = False
self.authenticated = False
self.logger.debug('Failed to contact qBittorrent via old and newapi')
self.logger.error('Cant contact qBittorrent, check you settings and try again %s' % e)
| mit | -863,452,485,712,000,900 | 36.507508 | 182 | 0.517374 | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/swap.py | 4 | 9829 | """Swap edges in a graph.
"""
import math
from networkx.utils import py_random_state
import networkx as nx
__all__ = ["double_edge_swap", "connected_double_edge_swap"]
@py_random_state(3)
def double_edge_swap(G, nswap=1, max_tries=100, seed=None):
"""Swap two edges in the graph while keeping the node degrees fixed.
A double-edge swap removes two randomly chosen edges u-v and x-y
and creates the new edges u-x and v-y::
u--v u v
becomes | |
x--y x y
If either the edge u-x or v-y already exist no swap is performed
and another attempt is made to find a suitable edge pair.
Parameters
----------
G : graph
An undirected graph
nswap : integer (optional, default=1)
Number of double-edge swaps to perform
max_tries : integer (optional)
Maximum number of attempts to swap edges
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : graph
The graph after double edge swaps.
Notes
-----
Does not enforce any connectivity constraints.
The graph G is modified in place.
"""
if G.is_directed():
raise nx.NetworkXError("double_edge_swap() not defined for directed graphs.")
if nswap > max_tries:
raise nx.NetworkXError("Number of swaps > number of tries allowed.")
if len(G) < 4:
raise nx.NetworkXError("Graph has less than four nodes.")
# Instead of choosing uniformly at random from a generated edge list,
# this algorithm chooses nonuniformly from the set of nodes with
# probability weighted by degree.
n = 0
swapcount = 0
keys, degrees = zip(*G.degree()) # keys, degree
cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree
discrete_sequence = nx.utils.discrete_sequence
while swapcount < nswap:
# if random.random() < 0.5: continue # trick to avoid periodicities?
# pick two random edges without creating edge list
# choose source node indices from discrete distribution
(ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
if ui == xi:
continue # same source, skip
u = keys[ui] # convert index to label
x = keys[xi]
# choose target uniformly from neighbors
v = seed.choice(list(G[u]))
y = seed.choice(list(G[x]))
if v == y:
continue # same target, skip
if (x not in G[u]) and (y not in G[v]): # don't create parallel edges
G.add_edge(u, x)
G.add_edge(v, y)
G.remove_edge(u, v)
G.remove_edge(x, y)
swapcount += 1
if n >= max_tries:
e = (
f"Maximum number of swap attempts ({n}) exceeded "
f"before desired swaps achieved ({nswap})."
)
raise nx.NetworkXAlgorithmError(e)
n += 1
return G
@py_random_state(3)
def connected_double_edge_swap(G, nswap=1, _window_threshold=3, seed=None):
"""Attempts the specified number of double-edge swaps in the graph `G`.
A double-edge swap removes two randomly chosen edges `(u, v)` and `(x,
y)` and creates the new edges `(u, x)` and `(v, y)`::
u--v u v
becomes | |
x--y x y
If either `(u, x)` or `(v, y)` already exist, then no swap is performed
so the actual number of swapped edges is always *at most* `nswap`.
Parameters
----------
G : graph
An undirected graph
nswap : integer (optional, default=1)
Number of double-edge swaps to perform
_window_threshold : integer
The window size below which connectedness of the graph will be checked
after each swap.
The "window" in this function is a dynamically updated integer that
represents the number of swap attempts to make before checking if the
graph remains connected. It is an optimization used to decrease the
running time of the algorithm in exchange for increased complexity of
implementation.
If the window size is below this threshold, then the algorithm checks
after each swap if the graph remains connected by checking if there is a
path joining the two nodes whose edge was just removed. If the window
size is above this threshold, then the algorithm performs do all the
swaps in the window and only then check if the graph is still connected.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
int
The number of successful swaps
Raises
------
NetworkXError
If the input graph is not connected, or if the graph has fewer than four
nodes.
Notes
-----
The initial graph `G` must be connected, and the resulting graph is
connected. The graph `G` is modified in place.
References
----------
.. [1] C. Gkantsidis and M. Mihail and E. Zegura,
The Markov chain simulation method for generating connected
power law random graphs, 2003.
http://citeseer.ist.psu.edu/gkantsidis03markov.html
"""
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected")
if len(G) < 4:
raise nx.NetworkXError("Graph has less than four nodes.")
n = 0
swapcount = 0
deg = G.degree()
# Label key for nodes
dk = list(n for n, d in G.degree())
cdf = nx.utils.cumulative_distribution(list(d for n, d in G.degree()))
discrete_sequence = nx.utils.discrete_sequence
window = 1
while n < nswap:
wcount = 0
swapped = []
# If the window is small, we just check each time whether the graph is
# connected by checking if the nodes that were just separated are still
# connected.
if window < _window_threshold:
# This Boolean keeps track of whether there was a failure or not.
fail = False
while wcount < window and n < nswap:
# Pick two random edges without creating the edge list. Choose
# source nodes from the discrete degree distribution.
(ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
# If the source nodes are the same, skip this pair.
if ui == xi:
continue
# Convert an index to a node label.
u = dk[ui]
x = dk[xi]
# Choose targets uniformly from neighbors.
v = seed.choice(list(G.neighbors(u)))
y = seed.choice(list(G.neighbors(x)))
# If the target nodes are the same, skip this pair.
if v == y:
continue
if x not in G[u] and y not in G[v]:
G.remove_edge(u, v)
G.remove_edge(x, y)
G.add_edge(u, x)
G.add_edge(v, y)
swapped.append((u, v, x, y))
swapcount += 1
n += 1
# If G remains connected...
if nx.has_path(G, u, v):
wcount += 1
# Otherwise, undo the changes.
else:
G.add_edge(u, v)
G.add_edge(x, y)
G.remove_edge(u, x)
G.remove_edge(v, y)
swapcount -= 1
fail = True
# If one of the swaps failed, reduce the window size.
if fail:
window = int(math.ceil(window / 2))
else:
window += 1
# If the window is large, then there is a good chance that a bunch of
# swaps will work. It's quicker to do all those swaps first and then
# check if the graph remains connected.
else:
while wcount < window and n < nswap:
# Pick two random edges without creating the edge list. Choose
# source nodes from the discrete degree distribution.
(ui, xi) = nx.utils.discrete_sequence(2, cdistribution=cdf)
# If the source nodes are the same, skip this pair.
if ui == xi:
continue
# Convert an index to a node label.
u = dk[ui]
x = dk[xi]
# Choose targets uniformly from neighbors.
v = seed.choice(list(G.neighbors(u)))
y = seed.choice(list(G.neighbors(x)))
# If the target nodes are the same, skip this pair.
if v == y:
continue
if x not in G[u] and y not in G[v]:
G.remove_edge(u, v)
G.remove_edge(x, y)
G.add_edge(u, x)
G.add_edge(v, y)
swapped.append((u, v, x, y))
swapcount += 1
n += 1
wcount += 1
# If the graph remains connected, increase the window size.
if nx.is_connected(G):
window += 1
# Otherwise, undo the changes from the previous window and decrease
# the window size.
else:
while swapped:
(u, v, x, y) = swapped.pop()
G.add_edge(u, v)
G.add_edge(x, y)
G.remove_edge(u, x)
G.remove_edge(v, y)
swapcount -= 1
window = int(math.ceil(window / 2))
return swapcount
| gpl-3.0 | 5,574,572,882,945,417,000 | 35.539033 | 85 | 0.550005 | false |
durandj/ynot-django | ynot/django/themes/templatetags/breadcrumbs.py | 1 | 2421 | from django import template as django_template
from django.template import defaulttags as django_defaulttags
from django.utils import encoding as django_encoding
# pylint: disable=invalid-name, too-few-public-methods
register = django_template.Library()
# pylint: disable=unused-argument
@register.tag
def breadcrumb(parser, token):
"""
Render breadcrumbs in the form of:
{% breadcrumb 'Breadcrumb title' url %}
"""
return BreadcrumbNode(token.split_contents()[1:])
# pylint: enable=unused-argument
@register.tag
def breadcrumb_url(parser, token):
"""
Render breadcrumbs in the form of:
{% breadcrumb 'Breadcrumb title' url args %}
"""
contents = token.split_contents()
if len(contents) == 2:
return breadcrumb(parser, token) # Shortcut to normal breadcrumbs
title = contents.pop(1)
token.contents = ' '.join(contents)
url = django_defaulttags.url(parser, token)
return UrlBreadcrumbNode(title, url)
class BreadcrumbNode(django_template.Node):
def __init__(self, args):
self.args = [django_template.Variable(arg) for arg in args]
def render(self, context):
title = self.args[0].var
if title.find('\'') == -1 and title.find('\"') == -1:
try:
val = self.args[0]
title = val.resolve(context)
except:
title = ''
else:
title = django_encoding.smart_unicode(title.strip('\'').strip('\"'))
url = None
if len(self.args) > 1:
val = self.args[1]
try:
url = val.resolve(context)
except django_template.VariableDoesNotExist:
url = None
return render_breadcrumb(title, url = url)
class UrlBreadcrumbNode(django_template.Node):
def __init__(self, title, url_node):
self.title = django_template.Variable(title)
self.url_node = url_node
def render(self, context):
title = self.title.var
if title.find('\'') == -1 and title.find('\"') == -1:
try:
val = self.title
title = val.resolve(context)
except:
title = ''
else:
title = django_encoding.smart_unicode(title.strip('\'').strip('\"'))
url = self.url_node.render(context)
return render_breadcrumb(title, url = url)
def render_breadcrumb(title, url = None):
if url:
breadcrumb_node = '<a href="{url}">{title}</a>'.format(
title = title,
url = url,
)
else:
breadcrumb_node = '<span>{title}</span>'.format(title = title)
breadcrumb_node = '<span class="ynot-breadcrumb">{}</span>'.format(breadcrumb_node)
return breadcrumb
| mit | -5,690,095,699,108,651,000 | 24.21875 | 84 | 0.674515 | false |
edx/edx-enterprise | enterprise/migrations/0111_pendingenterprisecustomeradminuser.py | 1 | 3009 | # Generated by Django 2.2.15 on 2020-09-09 14:31
import simple_history.models
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('enterprise', '0110_add_default_contract_discount'),
]
operations = [
migrations.CreateModel(
name='HistoricalPendingEnterpriseCustomerAdminUser',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('user_email', models.EmailField(max_length=254)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('enterprise_customer', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='enterprise.EnterpriseCustomer')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'verbose_name': 'historical pending enterprise customer admin user',
'ordering': ('-history_date', '-history_id'),
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='PendingEnterpriseCustomerAdminUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('user_email', models.EmailField(max_length=254)),
('enterprise_customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='enterprise.EnterpriseCustomer')),
],
options={
'unique_together': {('enterprise_customer', 'user_email')},
'ordering': ['created'],
},
),
]
| agpl-3.0 | -8,918,742,934,730,054,000 | 52.732143 | 205 | 0.627783 | false |
blueshed/blueshed-micro | blueshed/micro/utils/executor.py | 1 | 2437 | from blueshed.micro.utils import resources
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
from tornado.autoreload import add_reload_hook
from functools import wraps
import logging
import os
import inspect
from concurrent.futures.process import ProcessPoolExecutor
LOGGER = logging.getLogger(__name__)
_pool_ = None
def pool_init(pool):
global _pool_
_pool_ = pool
def pool_init_processes(pool_size, debug=False):
micro_pool = ProcessPoolExecutor(pool_size)
pool_init(micro_pool)
if debug is True:
add_reload_hook(micro_pool.shutdown)
logging.info("pool intialized with %s processes", pool_size)
return micro_pool
def global_pool():
global _pool_
return _pool_
def register_pool(name, pool):
resources.set_resource(name, pool)
def has_micro_context(f):
for k, v in inspect.signature(f).parameters.items():
if v.annotation == 'micro_context':
return k
def run_in_pool(_pid, _f, _has_context, context, *args, **kwargs):
# globals from the parent process in the
# IOLoop so clear them.
subprocess = os.getpid() != _pid
if subprocess and IOLoop.current(False):
LOGGER.debug("clearing tornado globals")
IOLoop.clear_current()
IOLoop.clear_instance()
LOGGER.debug("running %s %s", os.getpid(), context)
if _has_context:
kwargs[_has_context] = context
result = _f(*args, **kwargs)
if not subprocess:
return result
if isinstance(result, Future):
LOGGER.debug('running up tornado to complete')
def done(*args, **kwargs):
LOGGER.debug('stopping tornado')
IOLoop.current().stop()
result.add_done_callback(done)
IOLoop.current().start()
result = result.result()
return context, result
def pool(_f, resource_name=None):
has_context = has_micro_context(_f)
@wraps(_f)
def call(_f, context, *args, **kwargs):
global _pool_
if resource_name:
pool = resources.get_resource(resource_name)
elif _pool_:
pool = _pool_
if pool:
result = pool.submit(run_in_pool, os.getpid(), _f,
has_context, context, *args, **kwargs)
else:
if has_context:
kwargs[has_context] = context
result = _f(*args, **kwargs)
return result
return call
| mit | -5,598,329,544,635,889,000 | 26.077778 | 71 | 0.623718 | false |
espressofiend/NCIL-SOC-2015 | PsychoPy/stroop_lastrun.py | 1 | 14481 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.82.00), Mon Jun 22 22:53:33 2015
If you publish work using this script please cite the relevant PsychoPy publications
Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
expName = u'stroop' # from the Builder filename that created this script
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + 'data/%s_%s_%s' %(expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=u'/Users/aaron/Documents/GitHub/NCIL-SOC-2015/PsychoPy/stroop.psyexp',
savePickle=True, saveWideText=True,
dataFileName=filename)
#save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(size=(2560, 1440), fullscr=True, screen=0, allowGUI=False, allowStencil=False,
monitor=u'testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
)
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "instr"
instrClock = core.Clock()
instructionText = visual.TextStim(win=win, ori=0, name='instructionText',
text=u'Press left arrow if colour matches word\n\nPress right arrow if colour does not match word\n\nPress either arrow to start.', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=u'white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "trial"
trialClock = core.Clock()
ISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')
text = visual.TextStim(win=win, ori=0, name='text',
text=u'XXXXXX', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=u'white', colorSpace='rgb', opacity=1,
depth=-1.0)
text_2 = visual.TextStim(win=win, ori=0, name='text_2',
text=u'+', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=u'blue', colorSpace='rgb', opacity=1,
depth=-2.0)
text_3 = visual.TextStim(win=win, ori=0, name='text_3',
text='default text', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=1.0, colorSpace='rgb', opacity=1,
depth=-3.0)
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
#------Prepare to start Routine "instr"-------
t = 0
instrClock.reset() # clock
frameN = -1
# update component parameters for each repeat
key_resp_3 = event.BuilderKeyResponse() # create an object of type KeyResponse
key_resp_3.status = NOT_STARTED
# keep track of which components have finished
instrComponents = []
instrComponents.append(instructionText)
instrComponents.append(key_resp_3)
for thisComponent in instrComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "instr"-------
continueRoutine = True
while continueRoutine:
# get current time
t = instrClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instructionText* updates
if t >= 0.0 and instructionText.status == NOT_STARTED:
# keep track of start time/frame for later
instructionText.tStart = t # underestimates by a little under one frame
instructionText.frameNStart = frameN # exact frame index
instructionText.setAutoDraw(True)
# *key_resp_3* updates
if t >= 0.0 and key_resp_3.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_3.tStart = t # underestimates by a little under one frame
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.status = STARTED
# keyboard checking is just starting
key_resp_3.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if key_resp_3.status == STARTED:
theseKeys = event.getKeys(keyList=['left', 'right'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_3.keys = theseKeys[-1] # just the last key pressed
key_resp_3.rt = key_resp_3.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
routineTimer.reset() # if we abort early the non-slip timer needs reset
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instrComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
else: # this Routine was not non-slip safe so reset non-slip timer
routineTimer.reset()
#-------Ending Routine "instr"-------
for thisComponent in instrComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_3.keys in ['', [], None]: # No response was made
key_resp_3.keys=None
# store data for thisExp (ExperimentHandler)
thisExp.addData('key_resp_3.keys',key_resp_3.keys)
if key_resp_3.keys != None: # we had a response
thisExp.addData('key_resp_3.rt', key_resp_3.rt)
thisExp.nextEntry()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=1, method='random',
extraInfo=expInfo, originPath=u'/Users/aaron/Documents/GitHub/NCIL-SOC-2015/PsychoPy/stroop.psyexp',
trialList=data.importConditions(u'psychopy_playing_conditions.xlsx'),
seed=666, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
#------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
routineTimer.add(5.000000)
# update component parameters for each repeat
text_3.setColor(colour, colorSpace='rgb')
text_3.setText(word)
key_resp_2 = event.BuilderKeyResponse() # create an object of type KeyResponse
key_resp_2.status = NOT_STARTED
# keep track of which components have finished
trialComponents = []
trialComponents.append(ISI)
trialComponents.append(text)
trialComponents.append(text_2)
trialComponents.append(text_3)
trialComponents.append(key_resp_2)
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "trial"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text* updates
if t >= 0.0 and text.status == NOT_STARTED:
# keep track of start time/frame for later
text.tStart = t # underestimates by a little under one frame
text.frameNStart = frameN # exact frame index
text.setAutoDraw(True)
if text.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
text.setAutoDraw(False)
# *text_2* updates
if t >= 1.5 and text_2.status == NOT_STARTED:
# keep track of start time/frame for later
text_2.tStart = t # underestimates by a little under one frame
text_2.frameNStart = frameN # exact frame index
text_2.setAutoDraw(True)
if text_2.status == STARTED and t >= (1.5 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
text_2.setAutoDraw(False)
# *text_3* updates
if t >= 3 and text_3.status == NOT_STARTED:
# keep track of start time/frame for later
text_3.tStart = t # underestimates by a little under one frame
text_3.frameNStart = frameN # exact frame index
text_3.setAutoDraw(True)
if text_3.status == STARTED and t >= (3 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left
text_3.setAutoDraw(False)
# *key_resp_2* updates
if t >= 3 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t # underestimates by a little under one frame
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
key_resp_2.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if key_resp_2.status == STARTED and t >= (3 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left
key_resp_2.status = STOPPED
if key_resp_2.status == STARTED:
theseKeys = event.getKeys(keyList=['left', 'right'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_2.keys = theseKeys[-1] # just the last key pressed
key_resp_2.rt = key_resp_2.clock.getTime()
# was this 'correct'?
if (key_resp_2.keys == str(corrAns)) or (key_resp_2.keys == corrAns):
key_resp_2.corr = 1
else:
key_resp_2.corr = 0
# a response ends the routine
continueRoutine = False
# *ISI* period
if t >= 0.0 and ISI.status == NOT_STARTED:
# keep track of start time/frame for later
ISI.tStart = t # underestimates by a little under one frame
ISI.frameNStart = frameN # exact frame index
ISI.start(0.5)
elif ISI.status == STARTED: #one frame should pass before updating params and completing
ISI.complete() #finish the static period
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
routineTimer.reset() # if we abort early the non-slip timer needs reset
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none': key_resp_2.corr = 1 # correct non-response
else: key_resp_2.corr = 0 # failed to respond (incorrectly)
# store data for trials (TrialHandler)
trials.addData('key_resp_2.keys',key_resp_2.keys)
trials.addData('key_resp_2.corr', key_resp_2.corr)
if key_resp_2.keys != None: # we had a response
trials.addData('key_resp_2.rt', key_resp_2.rt)
thisExp.nextEntry()
# completed 1 repeats of 'trials'
win.close()
core.quit()
| mit | 883,825,306,107,728,600 | 43.832817 | 153 | 0.653477 | false |
google-research/graph-attribution | tests/test_graphnet_techniques.py | 1 | 4088 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for graphnet_techniques."""
from absl.testing import absltest
from absl.testing import parameterized
import graph_nets
import numpy as np
import tensorflow as tf
import experiments
import featurization
import graphnet_models as models
import graphnet_techniques as techniques
import graphs as graph_utils
import templates
class AttributionTechniquesTests(parameterized.TestCase):
"""Test attribution interface correctness."""
def _setup_graphs_model(self):
"""Setup graphs and smiles if needed."""
tensorizer = featurization.MolTensorizer()
smiles = ['CO', 'CCC', 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C']
graphs = graph_utils.smiles_to_graphs_tuple(smiles, tensorizer)
# Fix seed so that initialization is deterministic.
tf.random.set_seed(0)
model = experiments.GNN(5, 3, 10, 1, models.BlockType('gcn'), 'relu',
templates.TargetType.globals, 3)
model(graphs)
return graphs, model, tensorizer
def _setup_technique(self, name, tensorizer):
"""Setup attribution techniques."""
methods = techniques.get_techniques_dict(*tensorizer.get_null_vectors())
return methods[name]
def assertAttribution(self, graphs, atts):
atts = graph_nets.utils_tf.concat(atts, axis=0)
self.assertEqual(atts.nodes.ndim, 1)
self.assertEqual(atts.edges.ndim, 1)
self.assertEqual(graphs.nodes.shape[0], atts.nodes.shape[0])
self.assertEqual(graphs.edges.shape[0], atts.edges.shape[0])
np.testing.assert_allclose(graphs.n_node, atts.n_node)
np.testing.assert_allclose(graphs.n_edge, atts.n_edge)
np.testing.assert_allclose(graphs.senders, atts.senders)
np.testing.assert_allclose(graphs.receivers, atts.receivers)
@parameterized.parameters([
'Random', 'CAM', 'GradCAM-last', 'GradCAM-all', 'GradInput',
'SmoothGrad(GradInput)', 'IG'
])
def test_attribute(self, method_name):
"""Check we can attribute."""
graphs, model, tensorizer = self._setup_graphs_model()
method = self._setup_technique(method_name, tensorizer)
atts = method.attribute(graphs, model)
self.assertAttribution(graphs, atts)
@parameterized.parameters(
['CAM', 'GradCAM-last', 'GradCAM-all', 'GradInput', 'IG'])
def test_attribute_independence(self, method_name):
"""Check that atts are the same batched and non-batched."""
graphs, model, tensorizer = self._setup_graphs_model()
method = self._setup_technique(method_name, tensorizer)
atts = method.attribute(graphs, model)
single_graphs = graph_utils.split_graphs_tuple(graphs)
for xi, actual in zip(single_graphs, atts):
expected = method.attribute(xi, model)
np.testing.assert_allclose(actual.nodes, expected[0].nodes, rtol=1e-2)
np.testing.assert_allclose(actual.edges, expected[0].edges, rtol=1e-2)
self.assertAttribution(xi, expected)
def test_ig_sanity_check(self):
"""Check that IG improves with more integration steps."""
graphs, model, tensorizer = self._setup_graphs_model()
ref_fn = techniques.make_reference_fn(*tensorizer.get_null_vectors())
method_25 = techniques.IntegratedGradients(25, ref_fn)
method_100 = techniques.IntegratedGradients(100, ref_fn)
error_25 = method_25.sanity_check(graphs, model)['ig_error'].mean()
error_100 = method_100.sanity_check(graphs, model)['ig_error'].mean()
self.assertLessEqual(error_100, error_25)
if __name__ == '__main__':
tf.config.experimental_run_functions_eagerly(True)
absltest.main()
| apache-2.0 | -3,539,298,706,221,075,500 | 39.88 | 76 | 0.715509 | false |
SauloAislan/ironic | ironic/common/keystone.py | 1 | 4019 | # coding=utf-8
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Central place for handling Keystone authorization and service lookup."""
from keystoneauth1 import exceptions as kaexception
from keystoneauth1 import loading as kaloading
from oslo_log import log as logging
import six
from ironic.common import exception
from ironic.conf import CONF
LOG = logging.getLogger(__name__)
def ks_exceptions(f):
"""Wraps keystoneclient functions and centralizes exception handling."""
@six.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except kaexception.EndpointNotFound:
service_type = kwargs.get('service_type', 'baremetal')
endpoint_type = kwargs.get('endpoint_type', 'internal')
raise exception.CatalogNotFound(
service_type=service_type, endpoint_type=endpoint_type)
except (kaexception.Unauthorized, kaexception.AuthorizationFailure):
raise exception.KeystoneUnauthorized()
except (kaexception.NoMatchingPlugin,
kaexception.MissingRequiredOptions) as e:
raise exception.ConfigInvalid(six.text_type(e))
except Exception as e:
LOG.exception('Keystone request failed: %(msg)s',
{'msg': six.text_type(e)})
raise exception.KeystoneFailure(six.text_type(e))
return wrapper
@ks_exceptions
def get_session(group, **session_kwargs):
"""Loads session object from options in a configuration file section.
The session_kwargs will be passed directly to keystoneauth1 Session
and will override the values loaded from config.
Consult keystoneauth1 docs for available options.
:param group: name of the config section to load session options from
"""
return kaloading.load_session_from_conf_options(
CONF, group, **session_kwargs)
@ks_exceptions
def get_auth(group, **auth_kwargs):
"""Loads auth plugin from options in a configuration file section.
The auth_kwargs will be passed directly to keystoneauth1 auth plugin
and will override the values loaded from config.
Note that the accepted kwargs will depend on auth plugin type as defined
by [group]auth_type option.
Consult keystoneauth1 docs for available auth plugins and their options.
:param group: name of the config section to load auth plugin options from
"""
try:
auth = kaloading.load_auth_from_conf_options(CONF, group,
**auth_kwargs)
except kaexception.MissingRequiredOptions:
LOG.error('Failed to load auth plugin from group %s', group)
raise
return auth
# NOTE(pas-ha) Used by neutronclient and resolving ironic API only
# FIXME(pas-ha) remove this while moving to kesytoneauth adapters
@ks_exceptions
def get_service_url(session, **kwargs):
"""Find endpoint for given service in keystone catalog.
If 'interrace' is provided, fetches service url of this interface.
Otherwise, first tries to fetch 'internal' endpoint,
and then the 'public' one.
:param session: keystoneauth Session object
:param kwargs: any other arguments accepted by Session.get_endpoint method
"""
if 'interface' in kwargs:
return session.get_endpoint(**kwargs)
try:
return session.get_endpoint(interface='internal', **kwargs)
except kaexception.EndpointNotFound:
return session.get_endpoint(interface='public', **kwargs)
| apache-2.0 | 4,339,364,489,607,921,700 | 35.87156 | 78 | 0.700423 | false |
gnoack/ukechord | chordpro.py | 1 | 4308 | """Read ChordPro files and output them through a PDFWriter object"""
import re
import song
import uke
class ChordProError(Exception):
"""Error in a ChordPro input."""
pass
def _analyze_chordpro_textline(line):
"""Analyze the text and chords in a line of text.
Args:
line: The line of text, with chords in square brackets.
Returns:
A list of (chord, textchunk) tuples.
The chord is None for a leading piece of text without preceding chord.
Example:
Input: "This is [Dm]an example [C]line."
Output: [(None, "This is "), ("Dm", "an example "), ("C", "line.")]
"""
matches = list(re.finditer(r"\[([^\]]+)\]([^\[]*)", line))
if matches:
result = []
if matches[0].start(0):
result.append((None, line[:matches[0].start(0)]))
for match in matches:
result.append(match.groups())
return result
return [(None, line)]
def _chordpro_line(line):
"""Analyze a ChordPro line into a key value pair.
For commands of the form "{key:value}", those will be the key and value.
For empty lines, key is "$empty", and value is None.
For text lines, returns "$lyrics" as key
and a list of (chord, text) tuples as value
"""
line = line.strip()
if not line or line.startswith("#"):
return ("$empty", None)
if line.startswith("{") and line.endswith("}"):
key, unused_colon, value = line[1:-1].partition(":")
return (key, value)
else:
return ("$lyrics", _analyze_chordpro_textline(line))
def _parse_chord_definition(value):
# TODO: Is it required to define 'fingers' in each chord definition?
match = re.match(
r"\s+(?P<name>[A-Za-z0-9/+#]*)\s+"
r"frets\s+(?P<frets>[\d\s]+)"
r"fingers\s+(?P<fingers>[\d\s]+)$",
value)
# TODO: Implement finger positioning support
# TODO: Catch too high fret values
if not match:
raise ChordProError("Chord definition parsing failed", value)
frets = [int(fret) for fret in match.group('frets').split(' ') if fret]
if any(fret > uke.MAX_FRET for fret in frets):
raise ChordProError("Frets beyond %d don't exist.", uke.MAX_FRET)
return match.group('name'), tuple(frets)
def _convert_lines_to_ast_nodes(lines, chords, end_of_section_markers=()):
result = []
for key, value in lines:
if key in end_of_section_markers:
break
elif key == "$empty":
pass # ignore
elif key in ("$lyrics", "comment"):
if key == "$lyrics":
first_verse_item = song.Line(value)
elif key == "comment":
first_verse_item = song.Comment(value)
else:
raise ChordProError("Should never happen. - Programming error")
# Text
if end_of_section_markers:
# If we're in a section, lines are fine.
result.append(first_verse_item)
else:
verse_lines = _convert_lines_to_ast_nodes(
lines, chords=chords,
end_of_section_markers=("$empty"))
result.append(song.Verse([first_verse_item] + verse_lines))
elif key in ("soc", "start-of-chorus", "start_of_chorus"):
if end_of_section_markers:
raise ChordProError("ChordPro: Nested choruses are not supported.")
result.append(song.Chorus(
_convert_lines_to_ast_nodes(
lines, chords=chords,
end_of_section_markers=("eoc", "end-of-chorus", "end_of_chorus"))))
elif key == "define":
name, frets = _parse_chord_definition(value)
chords[name] = frets
elif key in ("title", "subtitle"):
continue # Handled earlier.
elif key == "fontsize":
# TODO: How to handle font size?
pass # Should translate to pdf_writer.setFontsize(int(value))
elif key in ("eoc", "end-of-chorus", "end_of_chorus"):
# If not already part of breaking condition.
raise ChordProError(
"End-of-chorus ChordPro command without matching start.")
else:
raise ChordProError("Unknown ChordPro command: %s", key)
return result
def to_ast(infile):
lines = [_chordpro_line(line) for line in infile.readlines()]
keys_and_values = dict(lines)
title = keys_and_values.get("title", "").strip()
subtitle = keys_and_values.get("subtitle", "").strip()
chords = {}
children = _convert_lines_to_ast_nodes(iter(lines), chords=chords)
return song.Song(children, title=title, subtitle=subtitle, chords=chords)
| apache-2.0 | -5,033,884,073,311,483,000 | 31.885496 | 77 | 0.634401 | false |
remind101/stacker_blueprints | stacker_blueprints/policies.py | 1 | 6595 | from awacs.aws import (
Action,
Allow,
Policy,
Principal,
Statement,
)
from troposphere import (
Sub,
Join,
Region,
AccountId,
AWSHelperFn
)
from awacs import (
sts,
s3,
logs,
ec2,
dynamodb,
cloudwatch,
)
def make_simple_assume_statement(*principals):
return Statement(
Principal=Principal('Service', principals),
Effect=Allow,
Action=[sts.AssumeRole])
def make_simple_assume_policy(*principals):
return Policy(
Statement=[
make_simple_assume_statement(*principals)])
def dynamodb_arn(table_name):
return 'arn:aws:dynamodb:::table/{}'.format(table_name)
def dynamodb_arns(table_names):
return [dynamodb_arn(table_name) for table_name in table_names]
def s3_arn(bucket):
if isinstance(bucket, AWSHelperFn):
return Sub('arn:aws:s3:::${Bucket}', Bucket=bucket)
else:
return 'arn:aws:s3:::%s' % bucket
def s3_objects_arn(bucket, folder="*"):
if isinstance(bucket, AWSHelperFn):
return Sub('arn:aws:s3:::${Bucket}/%s' % folder, Bucket=bucket)
else:
return 'arn:aws:s3:::%s/%s' % (bucket, folder)
def read_only_s3_bucket_policy_statements(buckets, folder="*"):
""" Read only policy an s3 bucket. """
list_buckets = [s3_arn(b) for b in buckets]
object_buckets = [s3_objects_arn(b, folder) for b in buckets]
bucket_resources = list_buckets + object_buckets
return [
Statement(
Effect=Allow,
Resource=[s3_arn("*")],
Action=[s3.ListAllMyBuckets]
),
Statement(
Effect=Allow,
Resource=bucket_resources,
Action=[Action('s3', 'Get*'), Action('s3', 'List*')]
)
]
def read_only_s3_bucket_policy(buckets):
return Policy(Statement=read_only_s3_bucket_policy_statements(buckets))
def read_write_s3_bucket_policy_statements(buckets, folder="*"):
list_buckets = [s3_arn(b) for b in buckets]
object_buckets = [s3_objects_arn(b, folder) for b in buckets]
return [
Statement(
Effect="Allow",
Action=[
s3.GetBucketLocation,
s3.ListAllMyBuckets,
],
Resource=[s3_arn("*")]
),
Statement(
Effect=Allow,
Action=[
s3.ListBucket,
s3.GetBucketVersioning,
],
Resource=list_buckets,
),
Statement(
Effect=Allow,
Action=[
s3.GetObject,
s3.PutObject,
s3.PutObjectAcl,
s3.DeleteObject,
s3.GetObjectVersion,
s3.DeleteObjectVersion,
],
Resource=object_buckets,
),
]
def read_write_s3_bucket_policy(buckets):
return Policy(Statement=read_write_s3_bucket_policy_statements(buckets))
def static_website_bucket_policy(bucket):
"""
Attach this policy directly to an S3 bucket to make it a static website.
This policy grants read access to **all unauthenticated** users.
"""
return Policy(
Statement=[
Statement(
Effect=Allow,
Principal=Principal("*"),
Action=[s3.GetObject],
Resource=[s3_objects_arn(bucket)],
)
]
)
def log_stream_arn(log_group_name, log_stream_name):
return Join(
'',
[
"arn:aws:logs:", Region, ":", AccountId, ":log-group:",
log_group_name, ":log-stream:", log_stream_name
]
)
def write_to_cloudwatch_logs_stream_statements(log_group_name,
log_stream_name):
return [
Statement(
Effect=Allow,
Action=[logs.PutLogEvents],
Resource=[log_stream_arn(log_group_name, log_stream_name)]
)
]
def write_to_cloudwatch_logs_stream_policy(log_group_name, log_stream_name):
return Policy(
Statement=write_to_cloudwatch_logs_stream_statements(log_group_name,
log_stream_name)
)
def cloudwatch_logs_write_statements(log_group=None):
resources = ["arn:aws:logs:*:*:*"]
if log_group:
log_group_parts = ["arn:aws:logs:", Region, ":", AccountId,
":log-group:", log_group]
log_group_arn = Join("", log_group_parts)
log_stream_wild = Join("", log_group_parts + [":*"])
resources = [log_group_arn, log_stream_wild]
return [
Statement(
Effect=Allow,
Resource=resources,
Action=[
logs.CreateLogGroup,
logs.CreateLogStream,
logs.PutLogEvents
]
)
]
def lambda_basic_execution_statements(function_name):
log_group = Join("/", ["/aws/lambda", function_name])
return cloudwatch_logs_write_statements(log_group)
def lambda_basic_execution_policy(function_name):
return Policy(Statement=lambda_basic_execution_statements(function_name))
def lambda_vpc_execution_statements():
"""Allow Lambda to manipuate EC2 ENIs for VPC support."""
return [
Statement(
Effect=Allow,
Resource=['*'],
Action=[
ec2.CreateNetworkInterface,
ec2.DescribeNetworkInterfaces,
ec2.DeleteNetworkInterface,
]
)
]
def flowlogs_assumerole_policy():
return make_simple_assume_policy("vpc-flow-logs.amazonaws.com")
# reference: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html#cfn-dynamodb-table-examples-application-autoscaling # noqa
def dynamodb_autoscaling_policy(tables):
"""Policy to allow AutoScaling a list of DynamoDB tables."""
return Policy(
Statement=[
Statement(
Effect=Allow,
Resource=dynamodb_arns(tables),
Action=[
dynamodb.DescribeTable,
dynamodb.UpdateTable,
]
),
Statement(
Effect=Allow,
Resource=['*'],
Action=[
cloudwatch.PutMetricAlarm,
cloudwatch.DescribeAlarms,
cloudwatch.GetMetricStatistics,
cloudwatch.SetAlarmState,
cloudwatch.DeleteAlarms,
]
),
]
)
| bsd-2-clause | -2,938,821,598,821,418,500 | 25.808943 | 167 | 0.549659 | false |
alexlee-gk/visual_dynamics | visual_dynamics/envs/quad_panda3d_env.py | 1 | 2967 | import numpy as np
import citysim3d.envs
from visual_dynamics.envs import Panda3dEnv
from visual_dynamics.spaces import Space, BoxSpace, TranslationAxisAngleSpace
from visual_dynamics.utils.config import ConfigObject
class SimpleQuadPanda3dEnv(citysim3d.envs.SimpleQuadPanda3dEnv, Panda3dEnv):
def _get_config(self):
config = super(SimpleQuadPanda3dEnv, self)._get_config()
car_action_space = self.car_action_space
if not isinstance(car_action_space, ConfigObject):
car_action_space = Space.create(car_action_space)
config.update({'action_space': self.action_space,
'sensor_names': self.sensor_names,
'camera_size': self.camera_size,
'camera_hfov': self.camera_hfov,
'offset': self.offset.tolist(),
'car_env_class': self.car_env_class,
'car_action_space': car_action_space,
'car_model_names': self.car_model_names})
return config
class Point3dSimpleQuadPanda3dEnv(SimpleQuadPanda3dEnv):
def __init__(self, action_space, **kwargs):
super(Point3dSimpleQuadPanda3dEnv, self).__init__(action_space, **kwargs)
self._observation_space.spaces['pos'] = BoxSpace(-np.inf, np.inf, shape=(3,))
def observe(self):
obs = super(Point3dSimpleQuadPanda3dEnv, self).observe()
obs['pos'] = np.array(self.car_node.getTransform(self.camera_node).getPos())
return obs
def main():
import os
import numpy as np
from panda3d.core import loadPrcFile
assert "CITYSIM3D_DIR" in os.environ
loadPrcFile(os.path.expandvars('${CITYSIM3D_DIR}/config.prc'))
action_space = TranslationAxisAngleSpace(np.array([-20, -10, -10, -1.5707963267948966]),
np.array([20, 10, 10, 1.5707963267948966]))
sensor_names = ['image', 'depth_image']
env = SimpleQuadPanda3dEnv(action_space, sensor_names)
import time
import cv2
start_time = time.time()
frames = 0
from visual_dynamics.policies.quad_target_policy import QuadTargetPolicy
pol = QuadTargetPolicy(env, (12, 18), (-np.pi / 2, np.pi / 2))
obs = env.reset()
pol.reset()
image, depth_image = obs
while True:
try:
env.render()
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow('Image window', image)
key = cv2.waitKey(1)
key &= 255
if key == 27 or key == ord('q'):
print("Pressed ESC or q, exiting")
break
quad_action = pol.act(obs)
obs, _, _, _ = env.step(quad_action)
image, depth_image = obs
frames += 1
except KeyboardInterrupt:
break
end_time = time.time()
print("average FPS: {}".format(frames / (end_time - start_time)))
if __name__ == "__main__":
main()
| mit | -5,554,164,767,170,576,000 | 34.321429 | 92 | 0.596562 | false |
osborne6/luminotes | view/Page_navigation.py | 1 | 1736 | from Tags import P, Span, A, Strong
class Page_navigation( P ):
def __init__( self, page_path, displayed_item_count, total_item_count, start, items_per_page, return_text = None ):
if start is None or items_per_page is None:
P.__init__( self )
return
if displayed_item_count == 1 and displayed_item_count < total_item_count:
if not return_text:
P.__init__( self )
return
P.__init__(
self,
Span(
A(
return_text,
href = "%s" % page_path,
),
),
)
return
if start == 0 and items_per_page >= total_item_count:
P.__init__( self )
return
P.__init__(
self,
( start > 0 ) and Span(
A(
u"previous",
href = self.href( page_path, max( start - items_per_page, 0 ), items_per_page ),
),
u" | ",
) or None,
[ Span(
( start == page_start ) and Strong( unicode( page_number + 1 ) ) or A(
Strong( unicode( page_number + 1 ) ),
href = self.href( page_path, page_start, items_per_page ),
),
) for ( page_number, page_start ) in enumerate( range( 0, total_item_count, items_per_page ) ) ],
( start + items_per_page < total_item_count ) and Span(
u" | ",
A(
u"next",
href = self.href( page_path, min( start + items_per_page, total_item_count - 1 ), items_per_page ),
),
) or None,
)
@staticmethod
def href( page_path, start, count ):
# if start is zero, leave off start and count parameters and just use the defaults
if start == 0:
return page_path
return u"%s?start=%d&count=%d" % ( page_path, start, count )
| gpl-3.0 | -6,957,415,015,767,892,000 | 28.423729 | 117 | 0.522465 | false |
tambetm/gymexperiments | a2c_atari.py | 1 | 10250 | import argparse
import os
import multiprocessing
from multiprocessing import Process, Queue, Array
import pickle
import gym
from gym.spaces import Box, Discrete
from keras.models import Model
from keras.layers import Input, TimeDistributed, Convolution2D, Flatten, LSTM, Dense
from keras.objectives import categorical_crossentropy
from keras.optimizers import Adam
from keras.utils import np_utils
import keras.backend as K
import numpy as np
from atari_utils import RandomizedResetEnv, AtariRescale42x42Env
def create_env(env_id):
env = gym.make(env_id)
env = RandomizedResetEnv(env)
env = AtariRescale42x42Env(env)
return env
def create_model(env, batch_size, num_steps):
# network inputs are observations and advantages
h = x = Input(batch_shape=(batch_size, num_steps) + env.observation_space.shape, name="x")
A = Input(batch_shape=(batch_size, num_steps), name="A")
# convolutional layers
h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c1')(h)
h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c2')(h)
h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c3')(h)
h = TimeDistributed(Convolution2D(64, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c4')(h)
h = TimeDistributed(Flatten(), name="fl")(h)
# recurrent layer
h = LSTM(32, return_sequences=True, stateful=True, name="r1")(h)
# policy network
p = TimeDistributed(Dense(env.action_space.n, activation='softmax'), name="p")(h)
# baseline network
b = TimeDistributed(Dense(1), name="b")(h)
# inputs to the model are observation and advantages,
# outputs are action probabilities and baseline
model = Model(input=[x, A], output=[p, b])
# policy gradient loss and entropy bonus
def policy_gradient_loss(l_sampled, l_predicted):
return K.mean(A * categorical_crossentropy(l_sampled, l_predicted), axis=1) \
- 0.01 * K.mean(categorical_crossentropy(l_predicted, l_predicted), axis=1)
# baseline is optimized with MSE
model.compile(optimizer='adam', loss=[policy_gradient_loss, 'mse'])
return model
def predict(model, observation):
# create inputs for batch (and timestep) of size 1
x = np.array([[observation]])
A = np.zeros((1, 1)) # dummy advantage
# predict action probabilities (and baseline state value)
p, b = model.predict_on_batch([x, A])
# return action probabilities and baseline
return p[0, 0], b[0, 0, 0]
def discount(rewards, terminals, v, gamma):
# calculate discounted future rewards for this trajectory
returns = []
# start with the predicted value of the last state
R = v
for r, t in zip(reversed(rewards), reversed(terminals)):
# if it was terminal state then restart from 0
if t:
R = 0
R = r + R * gamma
returns.insert(0, R)
return returns
def runner(shared_buffer, fifo, num_timesteps, monitor, args):
proc_name = multiprocessing.current_process().name
print("Runner %s started" % proc_name)
# local environment for runner
env = create_env(args.env_id)
# start monitor to record statistics and videos
if monitor:
env.monitor.start(args.env_id)
# copy of model
model = create_model(env, batch_size=1, num_steps=1)
# record episode lengths and rewards for statistics
episode_rewards = []
episode_lengths = []
episode_reward = 0
episode_length = 0
observation = env.reset()
for i in range(num_timesteps // args.num_local_steps):
# copy weights from main network at the beginning of iteration
# the main network's weights are only read, never modified
# but we create our own model instance, because Keras is not thread-safe
model.set_weights(pickle.loads(shared_buffer.raw))
observations = []
actions = []
rewards = []
terminals = []
baselines = []
for t in range(args.num_local_steps):
if args.display:
env.render()
# predict action probabilities (and baseline state value)
p, b = predict(model, observation)
# sample action using those probabilities
p /= np.sum(p) # ensure p-s sum up to 1
action = np.random.choice(env.action_space.n, p=p)
# log data
observations.append(observation)
actions.append(action)
baselines.append(b)
# step environment
observation, reward, terminal, _ = env.step(int(action))
rewards.append(reward)
terminals.append(terminal)
episode_reward += reward
episode_length += 1
# reset if terminal state
if terminal:
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
episode_reward = 0
episode_length = 0
observation = env.reset()
# calculate discounted returns
if terminal:
# if the last was terminal state then start from 0
returns = discount(rewards, terminals, 0, 0.99)
else:
# otherwise calculate the value of the last state
_, v = predict(model, observation)
returns = discount(rewards, terminals, v, 0.99)
# convert to numpy arrays
observations = np.array(observations)
actions = np_utils.to_categorical(actions, env.action_space.n)
baselines = np.array(baselines)
returns = np.array(returns)
advantages = returns - baselines
# send observations, actions, rewards and returns. blocks if fifo is full.
fifo.put((observations, actions, returns, advantages, episode_rewards, episode_lengths))
episode_rewards = []
episode_lengths = []
if monitor:
env.monitor.close()
print("Runner %s finished" % proc_name)
def trainer(model, fifos, shared_buffer, args):
proc_name = multiprocessing.current_process().name
print("Trainer %s started" % proc_name)
episode_rewards = []
episode_lengths = []
timestep = 0
while len(multiprocessing.active_children()) > 0 and timestep < args.num_timesteps:
batch_observations = []
batch_actions = []
batch_returns = []
batch_advantages = []
# loop over fifos from all runners
for q, fifo in enumerate(fifos):
# wait for a new trajectory and statistics
observations, actions, returns, advantages, rewards, lengths = fifo.get()
# add to batch
batch_observations.append(observations)
batch_actions.append(actions)
batch_returns.append(returns)
batch_advantages.append(advantages)
# log statistics
episode_rewards += rewards
episode_lengths += lengths
timestep += len(observations)
# form training data from observations, actions and returns
x = np.array(batch_observations)
p = np.array(batch_actions)
R = np.array(batch_returns)[:, :, np.newaxis]
A = np.array(batch_advantages)
# anneal learning rate
model.optimizer.lr = max(0.001 * (args.num_timesteps - timestep) / args.num_timesteps, 0)
# train the model
total_loss, policy_loss, baseline_loss = model.train_on_batch([x, A], [p, R])
# share model parameters
shared_buffer.raw = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
if timestep % args.stats_interval == 0:
print("Step %d/%d: episodes %d, mean episode reward %.2f, mean episode length %.2f." %
(timestep, args.num_timesteps, len(episode_rewards), np.mean(episode_rewards), np.mean(episode_lengths)))
episode_rewards = []
episode_lengths = []
print("Trainer %s finished" % proc_name)
def run(args):
# create dummy environment to be able to create model
env = create_env(args.env_id)
assert isinstance(env.observation_space, Box)
assert isinstance(env.action_space, Discrete)
print("Observation space: " + str(env.observation_space))
print("Action space: " + str(env.action_space))
# create main model
model = create_model(env, batch_size=args.num_runners, num_steps=args.num_local_steps)
model.summary()
env.close()
# for better compatibility with Theano and Tensorflow
multiprocessing.set_start_method('spawn')
# create shared buffer for sharing weights
blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
shared_buffer = Array('c', len(blob))
shared_buffer.raw = blob
# force runner processes to use cpu, child processes inherit environment variables
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# create fifos and processes for all runners
fifos = []
for i in range(args.num_runners):
fifo = Queue(args.queue_length)
fifos.append(fifo)
process = Process(target=runner,
args=(shared_buffer, fifo, args.num_timesteps // args.num_runners, args.monitor and i == 0, args))
process.start()
# start trainer in main thread
trainer(model, fifos, shared_buffer, args)
print("All done")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parallelization
parser.add_argument('--num_runners', type=int, default=2)
parser.add_argument('--queue_length', type=int, default=2)
# how long
parser.add_argument('--num_timesteps', type=int, default=5000000)
parser.add_argument('--num_local_steps', type=int, default=20)
parser.add_argument('--stats_interval', type=int, default=10000)
# technical
parser.add_argument('--display', action='store_true', default=False)
parser.add_argument('--monitor', action='store_true', default=False)
# mandatory
parser.add_argument('env_id')
args = parser.parse_args()
run(args)
| mit | 6,621,470,067,187,979,000 | 34.590278 | 137 | 0.639805 | false |
SymbiFlow/symbiflow-arch-defs | utils/lib/parse_route.py | 1 | 1541 | """ Library for parsing route output from VPR route files. """
from collections import namedtuple
Node = namedtuple('Node', 'inode x_low y_low x_high y_high ptc')
def format_name(s):
""" Converts VPR parenthesized name to just name. """
assert s[0] == '('
assert s[-1] == ')'
return s[1:-1]
def format_coordinates(coord):
""" Parses coordinates from VPR route file in format of (x,y). """
coord = format_name(coord)
x, y = coord.split(',')
return int(x), int(y)
def find_net_sources(f):
""" Yields tuple of (net string, Node namedtuple) from file object.
File object should be formatted as VPR route output file.
"""
net = None
for e in f:
tokens = e.strip().split()
if not tokens:
continue
elif tokens[0][0] == '#':
continue
elif tokens[0] == 'Net':
net = format_name(tokens[2])
elif e == "\n\nUsed in local cluster only, reserved one CLB pin\n\n":
continue
else:
if net is not None:
inode = int(tokens[1])
assert tokens[2] == 'SOURCE'
x, y = format_coordinates(tokens[3])
if tokens[4] == 'to':
x2, y2 = format_coordinates(tokens[5])
offset = 2
else:
x2, y2 = x, y
offset = 0
ptc = int(tokens[5 + offset])
yield net, Node(inode, x, y, x2, y2, ptc)
net = None
| isc | 6,817,013,597,185,511,000 | 27.018182 | 77 | 0.506814 | false |
demisto/content | Packs/Silverfort/Integrations/Silverfort/Silverfort_test.py | 1 | 5886 | import pytest
from unittest.mock import patch
from Silverfort import get_user_entity_risk_command, get_resource_entity_risk_command,\
update_user_entity_risk_command, update_resource_entity_risk_command
API_KEY = "APIKEY"
@pytest.fixture(autouse=True)
def upn():
return '[email protected]'
@pytest.fixture(autouse=True)
def base_url():
return 'https://test.com'
@pytest.fixture(autouse=True)
def email():
return '[email protected]'
@pytest.fixture(autouse=True)
def domain():
return 'silverfort.io'
@pytest.fixture(autouse=True)
def api_key():
return 'APIKEY'
@pytest.fixture(autouse=True)
def risk():
return {'risk_name': 'activity_risk', 'severity': 'medium', 'valid_for': 1, 'description': 'Suspicious activity'}
@pytest.fixture(autouse=True)
def resource_name():
return 'AA--DC-1'
@pytest.fixture(autouse=True)
def bad_response():
return 'No valid response'
@pytest.fixture(autouse=True)
def valid_update_response():
return {"result": "updated successfully!"}
@pytest.fixture(autouse=True)
def valid_get_risk_response():
return {"risk": "Low", "reasons": ["Password never expires", "Suspicious activity"]}
@pytest.fixture(autouse=True)
def valid_get_upn_response(upn):
return {"user_principal_name": upn}
@pytest.fixture(autouse=True)
def sam_account():
return 'sfuser'
@pytest.fixture(autouse=True)
def client(base_url):
from Silverfort import Client
return Client(base_url=base_url, verify=False)
@pytest.fixture(autouse=True)
def risk_args(risk):
return {'risk_name': 'activity_risk', 'severity': 'medium', 'valid_for': 1, 'description': 'Suspicious activity'}
class TestSiverfort(object):
@patch('Silverfort.API_KEY', API_KEY)
def test_get_status(self, requests_mock, base_url, api_key, client):
from Silverfort import test_module
requests_mock.get(f'{base_url}/getBootStatus?apikey={api_key}', json="True")
output = test_module(client)
assert output == "ok"
@patch('Silverfort.API_KEY', API_KEY)
def test_get_upn_by_email(self, requests_mock, upn, base_url, valid_get_upn_response, api_key, client, email, domain):
requests_mock.get(f'{base_url}/getUPN?apikey={api_key}&email={email}&domain={domain}', json=valid_get_upn_response)
output = client.get_upn_by_email_or_sam_account_http_request(domain, email=email)
assert output == upn
@patch('Silverfort.API_KEY', API_KEY)
def test_get_upn_by_sam_account(self, requests_mock, upn, base_url, valid_get_upn_response, api_key, client, sam_account,
domain):
requests_mock.get(f'{base_url}/getUPN?apikey={api_key}&sam_account={sam_account}&domain={domain}',
json=valid_get_upn_response)
output = client.get_upn_by_email_or_sam_account_http_request(domain, sam_account=sam_account)
assert output == upn
@patch('Silverfort.API_KEY', API_KEY)
def test_get_user_entity_risk(self, requests_mock, upn, base_url, api_key, client, valid_get_risk_response):
args = {'upn': upn}
requests_mock.get(f'{base_url}/getEntityRisk?apikey={api_key}&user_principal_name={upn}',
json=valid_get_risk_response)
_, outputs, _ = get_user_entity_risk_command(client, args)
outputs = outputs['Silverfort.UserRisk(val.UPN && val.UPN == obj.UPN)']
assert outputs["UPN"] == upn
assert outputs["Risk"] == valid_get_risk_response["risk"]
assert outputs["Reasons"] == valid_get_risk_response["reasons"]
@patch('Silverfort.API_KEY', API_KEY)
def test_get_resource_entity_risk(self, requests_mock, base_url, api_key, client, valid_get_risk_response, resource_name,
domain):
args = {'resource_name': resource_name, 'domain_name': domain}
requests_mock.get(f'{base_url}/getEntityRisk?apikey={api_key}&resource_name={resource_name}'
f'&domain_name={domain}', json=valid_get_risk_response)
_, outputs, _ = get_resource_entity_risk_command(client, args)
outputs = outputs['Silverfort.ResourceRisk(val.ResourceName && val.ResourceName == obj.ResourceName)']
assert outputs["ResourceName"] == resource_name
assert outputs["Risk"] == valid_get_risk_response["risk"]
assert outputs["Reasons"] == valid_get_risk_response["reasons"]
@patch('Silverfort.API_KEY', API_KEY)
def test_update_user_entity_risk(self, requests_mock, upn, base_url, api_key, client, valid_update_response, bad_response,
risk_args):
args = risk_args
args['upn'] = upn
requests_mock.post(f'{base_url}/updateEntityRisk?apikey={api_key}', json=valid_update_response)
assert update_user_entity_risk_command(client, args) == "updated successfully!"
requests_mock.post(f'{base_url}/updateEntityRisk?apikey={api_key}', json=bad_response)
assert update_user_entity_risk_command(client, args) == "Couldn't update the user entity's risk"
@patch('Silverfort.API_KEY', API_KEY)
def test_update_resource_entity_risk_successfully(self, requests_mock, base_url, api_key, client, valid_update_response,
bad_response, risk_args, resource_name, domain):
args = risk_args
args['resource_name'] = resource_name
args['domain_name'] = domain
requests_mock.post(f'{base_url}/updateEntityRisk?apikey={api_key}', json=valid_update_response)
assert update_resource_entity_risk_command(client, args) == 'updated successfully!'
requests_mock.post(f'{base_url}/updateEntityRisk?apikey={api_key}', json=bad_response)
assert update_resource_entity_risk_command(client, args) == "Couldn't update the resource entity's risk"
| mit | -7,427,504,390,809,783,000 | 36.730769 | 126 | 0.659531 | false |
NaohiroTamura/python-ironicclient | ironicclient/v1/chassis.py | 1 | 6569 | # -*- coding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.common import base
from ironicclient.common.i18n import _
from ironicclient.common import utils
from ironicclient import exc
class Chassis(base.Resource):
def __repr__(self):
return "<Chassis %s>" % self._info
class ChassisManager(base.CreateManager):
resource_class = Chassis
_resource_name = 'chassis'
_creation_attributes = ['description', 'extra', 'uuid']
def list(self, marker=None, limit=None, sort_key=None,
sort_dir=None, detail=False, fields=None):
"""Retrieve a list of chassis.
:param marker: Optional, the UUID of a chassis, eg the last
chassis from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of chassis to return.
2) limit == 0, return the entire list of chassis.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Ironic API
(see Ironic's api.max_limit option).
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:param detail: Optional, boolean whether to return detailed information
about chassis.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned. Can not be used
when 'detail' is set.
:returns: A list of chassis.
"""
if limit is not None:
limit = int(limit)
if detail and fields:
raise exc.InvalidAttribute(_("Can't fetch a subset of fields "
"with 'detail' set"))
filters = utils.common_filters(marker, limit, sort_key, sort_dir,
fields)
path = ''
if detail:
path += 'detail'
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), "chassis")
else:
return self._list_pagination(self._path(path), "chassis",
limit=limit)
def list_nodes(self, chassis_id, marker=None, limit=None,
sort_key=None, sort_dir=None, detail=False, fields=None,
associated=None, maintenance=None, provision_state=None):
"""List all the nodes for a given chassis.
:param chassis_id: The UUID of the chassis.
:param marker: Optional, the UUID of a node, eg the last
node from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of nodes to return.
2) limit == 0, return the entire list of nodes.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Ironic API
(see Ironic's api.max_limit option).
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:param detail: Optional, boolean whether to return detailed information
about nodes.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned. Can not be used
when 'detail' is set.
:param associated: Optional. Either a Boolean or a string
representation of a Boolean that indicates whether
to return a list of associated (True or "True") or
unassociated (False or "False") nodes.
:param maintenance: Optional. Either a Boolean or a string
representation of a Boolean that indicates whether
to return nodes in maintenance mode (True or
"True"), or not in maintenance mode (False or
"False").
:param provision_state: Optional. String value to get only nodes in
that provision state.
:returns: A list of nodes.
"""
if limit is not None:
limit = int(limit)
if detail and fields:
raise exc.InvalidAttribute(_("Can't fetch a subset of fields "
"with 'detail' set"))
filters = utils.common_filters(marker, limit, sort_key, sort_dir,
fields)
if associated is not None:
filters.append('associated=%s' % associated)
if maintenance is not None:
filters.append('maintenance=%s' % maintenance)
if provision_state is not None:
filters.append('provision_state=%s' % provision_state)
path = "%s/nodes" % chassis_id
if detail:
path += '/detail'
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), "nodes")
else:
return self._list_pagination(self._path(path), "nodes",
limit=limit)
def get(self, chassis_id, fields=None):
return self._get(resource_id=chassis_id, fields=fields)
def delete(self, chassis_id):
return self._delete(resource_id=chassis_id)
def update(self, chassis_id, patch):
return self._update(resource_id=chassis_id, patch=patch)
| apache-2.0 | 1,744,163,797,057,530,600 | 37.635294 | 79 | 0.564708 | false |
1905410/Misago | misago/threads/views/admin/attachments.py | 1 | 3500 | from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Count
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from misago.admin.views import generic
from ...forms import SearchAttachmentsForm
from ...models import Attachment, Post
class AttachmentAdmin(generic.AdminBaseMixin):
root_link = 'misago:admin:system:attachments:index'
Model = Attachment
templates_dir = 'misago/admin/attachments'
message_404 = _("Requested attachment could not be found.")
def get_queryset(self):
qs = super(AttachmentAdmin, self).get_queryset()
return qs.select_related('filetype', 'uploader', 'post', 'post__thread', 'post__category')
class AttachmentsList(AttachmentAdmin, generic.ListView):
items_per_page = 20
ordering = (
('-id', _("From newest")),
('id', _("From oldest")),
('filename', _("A to z")),
('-filename', _("Z to a")),
('size', _("Smallest files")),
('-size', _("Largest files")),
)
selection_label = _('With attachments: 0')
empty_selection_label = _('Select attachments')
mass_actions = [
{
'action': 'delete',
'name': _("Delete attachments"),
'icon': 'fa fa-times-circle',
'confirmation': _("Are you sure you want to delete selected attachments?"),
'is_atomic': False
}
]
def get_search_form(self, request):
return SearchAttachmentsForm
def action_delete(self, request, attachments):
deleted_attachments = []
desynced_posts = []
for attachment in attachments:
if attachment.post:
deleted_attachments.append(attachment.pk)
desynced_posts.append(attachment.post_id)
if desynced_posts:
with transaction.atomic():
for post in Post.objects.select_for_update().filter(id__in=desynced_posts):
self.delete_from_cache(post, deleted_attachments)
for attachment in attachments:
attachment.delete()
message = _("Selected attachments have been deleted.")
messages.success(request, message)
def delete_from_cache(self, post, attachments):
if not post.attachments_cache:
return # admin action may be taken due to desynced state
clean_cache = []
for a in post.attachments_cache:
if a['id'] not in attachments:
clean_cache.append(a)
post.attachments_cache = clean_cache or None
post.save(update_fields=['attachments_cache'])
class DeleteAttachment(AttachmentAdmin, generic.ButtonView):
def button_action(self, request, target):
if target.post:
self.delete_from_cache(target)
target.delete()
message = _('Attachment "%(filename)s" has been deleted.')
messages.success(request, message % {'filename': target.filename})
def delete_from_cache(self, attachment):
if not attachment.post.attachments_cache:
return # admin action may be taken due to desynced state
clean_cache = []
for a in attachment.post.attachments_cache:
if a['id'] != attachment.id:
clean_cache.append(a)
attachment.post.attachments_cache = clean_cache or None
attachment.post.save(update_fields=['attachments_cache'])
| gpl-2.0 | -8,330,892,099,958,133,000 | 33.653465 | 98 | 0.626857 | false |
pombredanne/blivet-1 | tests/devicelibs_test/edd_test.py | 1 | 9403 | import mock
class EddTestCase(mock.TestCase):
def setUp(self):
self.setupModules(
['_isys', 'logging', 'pyanaconda.anaconda_log', 'block'])
def tearDown(self):
self.tearDownModules()
def test_biosdev_to_edd_dir(self):
from blivet.devicelibs import edd
path = edd.biosdev_to_edd_dir(138)
self.assertEqual("/sys/firmware/edd/int13_dev8a", path)
def test_collect_edd_data(self):
from blivet.devicelibs import edd
# test with vda, vdb
fs = EddTestFS(self, edd).vda_vdb()
edd_dict = edd.collect_edd_data()
self.assertEqual(len(edd_dict), 2)
self.assertEqual(edd_dict[0x80].type, "SCSI")
self.assertEqual(edd_dict[0x80].scsi_id, 0)
self.assertEqual(edd_dict[0x80].scsi_lun, 0)
self.assertEqual(edd_dict[0x80].pci_dev, "00:05.0")
self.assertEqual(edd_dict[0x80].channel, 0)
self.assertEqual(edd_dict[0x80].sectors, 16777216)
self.assertEqual(edd_dict[0x81].pci_dev, "00:06.0")
# test with sda, vda
fs = EddTestFS(self, edd).sda_vda()
edd_dict = edd.collect_edd_data()
self.assertEqual(len(edd_dict), 2)
self.assertEqual(edd_dict[0x80].type, "ATA")
self.assertEqual(edd_dict[0x80].scsi_id, None)
self.assertEqual(edd_dict[0x80].scsi_lun, None)
self.assertEqual(edd_dict[0x80].pci_dev, "00:01.1")
self.assertEqual(edd_dict[0x80].channel, 0)
self.assertEqual(edd_dict[0x80].sectors, 2097152)
self.assertEqual(edd_dict[0x80].ata_device, 0)
self.assertEqual(edd_dict[0x80].mbr_signature, "0x000ccb01")
def test_collect_edd_data_cciss(self):
from blivet.devicelibs import edd
fs = EddTestFS(self, edd).sda_cciss()
edd_dict = edd.collect_edd_data()
self.assertEqual(edd_dict[0x80].pci_dev, None)
self.assertEqual(edd_dict[0x80].channel, None)
def test_edd_entry_str(self):
from blivet.devicelibs import edd
fs = EddTestFS(self, edd).sda_vda()
edd_dict = edd.collect_edd_data()
expected_output = """\ttype: ATA, ata_device: 0
\tchannel: 0, mbr_signature: 0x000ccb01
\tpci_dev: 00:01.1, scsi_id: None
\tscsi_lun: None, sectors: 2097152"""
self.assertEqual(str(edd_dict[0x80]), expected_output)
def test_matcher_device_path(self):
from blivet.devicelibs import edd
fs = EddTestFS(self, edd).sda_vda()
edd_dict = edd.collect_edd_data()
analyzer = edd.EddMatcher(edd_dict[0x80])
path = analyzer.devname_from_pci_dev()
self.assertEqual(path, "sda")
analyzer = edd.EddMatcher(edd_dict[0x81])
path = analyzer.devname_from_pci_dev()
self.assertEqual(path, "vda")
def test_bad_device_path(self):
from blivet.devicelibs import edd
fs = EddTestFS(self, edd).sda_vda_no_pcidev()
edd_dict = edd.collect_edd_data()
analyzer = edd.EddMatcher(edd_dict[0x80])
path = analyzer.devname_from_pci_dev()
self.assertEqual(path, None)
def test_bad_host_bus(self):
from blivet.devicelibs import edd
fs = EddTestFS(self, edd).sda_vda_no_host_bus()
edd_dict = edd.collect_edd_data()
# 0x80 entry is basted so fail without an exception
analyzer = edd.EddMatcher(edd_dict[0x80])
devname = analyzer.devname_from_pci_dev()
self.assertEqual(devname, None)
# but still succeed on 0x81
analyzer = edd.EddMatcher(edd_dict[0x81])
devname = analyzer.devname_from_pci_dev()
self.assertEqual(devname, "vda")
def test_get_edd_dict_1(self):
""" Test get_edd_dict()'s pci_dev matching. """
from blivet.devicelibs import edd
fs = EddTestFS(self, edd).sda_vda()
self.assertEqual(edd.get_edd_dict([]),
{'sda' : 0x80,
'vda' : 0x81})
def test_get_edd_dict_2(self):
""" Test get_edd_dict()'s pci_dev matching. """
from blivet.devicelibs import edd
edd.collect_mbrs = mock.Mock(return_value = {
'sda' : '0x000ccb01',
'vda' : '0x0006aef1'})
fs = EddTestFS(self, edd).sda_vda_missing_details()
self.assertEqual(edd.get_edd_dict([]),
{'sda' : 0x80,
'vda' : 0x81})
def test_get_edd_dict_3(self):
""" Test scenario when the 0x80 and 0x81 edd directories contain the
same data and give no way to distinguish among the two devices.
"""
from blivet.devicelibs import edd
edd.log = mock.Mock()
edd.collect_mbrs = mock.Mock(return_value={'sda' : '0x000ccb01',
'vda' : '0x0006aef1'})
fs = EddTestFS(self, edd).sda_sdb_same()
self.assertEqual(edd.get_edd_dict([]), {})
self.assertIn((('edd: both edd entries 0x80 and 0x81 seem to map to sda',), {}),
edd.log.info.call_args_list)
class EddTestFS(object):
def __init__(self, test_case, target_module):
self.fs = mock.DiskIO()
test_case.take_over_io(self.fs, target_module)
def sda_vda_missing_details(self):
self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir()
self.fs["/sys/firmware/edd/int13_dev80/mbr_signature"] = "0x000ccb01\n"
self.fs["/sys/firmware/edd/int13_dev81"] = self.fs.Dir()
self.fs["/sys/firmware/edd/int13_dev81/mbr_signature"] = "0x0006aef1\n"
def sda_vda(self):
self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir()
self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCI 00:01.1 channel: 0\n"
self.fs["/sys/firmware/edd/int13_dev80/interface"] = "ATA device: 0\n"
self.fs["/sys/firmware/edd/int13_dev80/mbr_signature"] = "0x000ccb01\n"
self.fs["/sys/firmware/edd/int13_dev80/sectors"] = "2097152\n"
self.fs["/sys/firmware/edd/int13_dev81"] = self.fs.Dir()
self.fs["/sys/firmware/edd/int13_dev81/host_bus"] = "PCI 00:05.0 channel: 0\n"
self.fs["/sys/firmware/edd/int13_dev81/interface"] = "SCSI id: 0 lun: 0\n"
self.fs["/sys/firmware/edd/int13_dev81/mbr_signature"] = "0x0006aef1\n"
self.fs["/sys/firmware/edd/int13_dev81/sectors"] = "16777216\n"
self.fs["/sys/devices/pci0000:00/0000:00:01.1/host0/target0:0:0/0:0:0:0/block"] = self.fs.Dir()
self.fs["/sys/devices/pci0000:00/0000:00:01.1/host0/target0:0:0/0:0:0:0/block/sda"] = self.fs.Dir()
self.fs["/sys/devices/pci0000:00/0000:00:05.0/virtio2/block"] = self.fs.Dir()
self.fs["/sys/devices/pci0000:00/0000:00:05.0/virtio2/block/vda"] = self.fs.Dir()
return self.fs
def sda_vda_no_pcidev(self):
self.sda_vda()
entries = [e for e in self.fs.fs if e.startswith("/sys/devices/pci")]
map(self.fs.os_remove, entries)
return self.fs
def sda_vda_no_host_bus(self):
self.sda_vda()
self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCI 00:01.1 channel: \n"
self.fs.os_remove("/sys/firmware/edd/int13_dev80/mbr_signature")
self.fs.os_remove("/sys/firmware/edd/int13_dev81/mbr_signature")
def sda_cciss(self):
self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir()
self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCIX 05:00.0 channel: 0\n"
self.fs["/sys/firmware/edd/int13_dev80/interface"] = "RAID identity_tag: 0\n"
self.fs["/sys/firmware/edd/int13_dev80/mbr_signature"] = "0x000ccb01\n"
self.fs["/sys/firmware/edd/int13_dev80/sectors"] = "2097152\n"
return self.fs
def vda_vdb(self):
self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir()
self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCI 00:05.0 channel: 0\n"
self.fs["/sys/firmware/edd/int13_dev80/interface"] = "SCSI id: 0 lun: 0\n"
self.fs["/sys/firmware/edd/int13_dev80/sectors"] = "16777216\n"
self.fs["/sys/firmware/edd/int13_dev81"] = self.fs.Dir()
self.fs["/sys/firmware/edd/int13_dev81/host_bus"] = "PCI 00:06.0 channel: 0\n"
self.fs["/sys/firmware/edd/int13_dev81/interface"] = "SCSI id: 0 lun: 0\n"
self.fs["/sys/firmware/edd/int13_dev81/sectors"] = "4194304\n"
return self.fs
def sda_sdb_same(self):
self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir()
self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCI 00:01.1 channel: 0\n"
self.fs["/sys/firmware/edd/int13_dev80/interface"] = "ATA device: 0\n"
self.fs["/sys/firmware/edd/int13_dev80/mbr_signature"] = "0x000ccb01"
self.fs["/sys/firmware/edd/int13_dev80/sectors"] = "2097152\n"
self.fs["/sys/firmware/edd/int13_dev81"] = self.fs.Dir()
self.fs["/sys/firmware/edd/int13_dev81/host_bus"] = "PCI 00:01.1 channel: 0\n"
self.fs["/sys/firmware/edd/int13_dev81/interface"] = "ATA device: 0\n"
self.fs["/sys/firmware/edd/int13_dev81/mbr_signature"] = "0x0006aef1"
self.fs["/sys/firmware/edd/int13_dev81/sectors"] = "2097152\n"
self.fs["/sys/devices/pci0000:00/0000:00:01.1/host0/target0:0:0/0:0:0:0/block"] = self.fs.Dir()
self.fs["/sys/devices/pci0000:00/0000:00:01.1/host0/target0:0:0/0:0:0:0/block/sda"] = self.fs.Dir()
| gpl-2.0 | 2,521,215,230,570,225,700 | 43.353774 | 107 | 0.605764 | false |
googleapis/python-pubsublite | google/cloud/pubsublite_v1/services/cursor_service/transports/__init__.py | 1 | 1185 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import CursorServiceTransport
from .grpc import CursorServiceGrpcTransport
from .grpc_asyncio import CursorServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[CursorServiceTransport]]
_transport_registry["grpc"] = CursorServiceGrpcTransport
_transport_registry["grpc_asyncio"] = CursorServiceGrpcAsyncIOTransport
__all__ = (
"CursorServiceTransport",
"CursorServiceGrpcTransport",
"CursorServiceGrpcAsyncIOTransport",
)
| apache-2.0 | 220,261,319,148,648,960 | 34.909091 | 84 | 0.775527 | false |
great-expectations/great_expectations | tests/dataset/test_sparkdfdataset.py | 1 | 14191 | import importlib.util
import json
from unittest import mock
import pandas as pd
import pytest
from great_expectations.dataset.sparkdf_dataset import SparkDFDataset
from great_expectations.util import is_library_loadable
def test_sparkdfdataset_persist(spark_session):
df = pd.DataFrame({"a": [1, 2, 3]})
sdf = spark_session.createDataFrame(df)
sdf.persist = mock.MagicMock()
_ = SparkDFDataset(sdf, persist=True)
sdf.persist.assert_called_once()
sdf = spark_session.createDataFrame(df)
sdf.persist = mock.MagicMock()
_ = SparkDFDataset(sdf, persist=False)
sdf.persist.assert_not_called()
sdf = spark_session.createDataFrame(df)
sdf.persist = mock.MagicMock()
_ = SparkDFDataset(sdf)
sdf.persist.assert_called_once()
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
@pytest.fixture
def test_dataframe(spark_session):
from pyspark.sql.types import IntegerType, StringType, StructField, StructType
schema = StructType(
[
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField(
"address",
StructType(
[
StructField("street", StringType(), True),
StructField("city", StringType(), True),
StructField("house_number", IntegerType(), True),
]
),
False,
),
StructField("name_duplicate", StringType(), True),
StructField("non.nested", StringType(), True),
StructField("name_with_duplicates", StringType(), True),
StructField("age_with_duplicates", IntegerType(), True),
StructField(
"address_with_duplicates",
StructType(
[
StructField("street", StringType(), True),
StructField("city", StringType(), True),
StructField("house_number", IntegerType(), True),
]
),
False,
),
]
)
rows = [
(
"Alice",
1,
("Street 1", "Alabama", 10),
"Alice",
"a",
"Alice",
1,
("Street 1", "Alabama", 12),
),
(
"Bob",
2,
("Street 2", "Brooklyn", 11),
"Bob",
"b",
"Bob",
2,
("Street 1", "Brooklyn", 12),
),
(
"Charlie",
3,
("Street 3", "Alabama", 12),
"Charlie",
"c",
"Charlie",
3,
("Street 1", "Alabama", 12),
),
(
"Dan",
4,
("Street 4", "Boston", 12),
"Dan",
"d",
"Charlie",
3,
("Street 1", "Boston", 12),
),
]
rdd = spark_session.sparkContext.parallelize(rows)
df = spark_session.createDataFrame(rdd, schema)
return SparkDFDataset(df, persist=True)
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_column_values_to_be_of_type(spark_session, test_dataframe):
"""
data asset expectation
"""
from pyspark.sql.utils import AnalysisException
assert test_dataframe.expect_column_values_to_be_of_type(
"address.street", "StringType"
).success
assert test_dataframe.expect_column_values_to_be_of_type(
"`non.nested`", "StringType"
).success
assert test_dataframe.expect_column_values_to_be_of_type(
"name", "StringType"
).success
with pytest.raises(AnalysisException):
test_dataframe.expect_column_values_to_be_of_type("non.nested", "StringType")
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_column_values_to_be_of_type(spark_session, test_dataframe):
"""
data asset expectation
"""
from pyspark.sql.utils import AnalysisException
assert test_dataframe.expect_column_values_to_be_of_type(
"address.street", "StringType"
).success
assert test_dataframe.expect_column_values_to_be_of_type(
"`non.nested`", "StringType"
).success
assert test_dataframe.expect_column_values_to_be_of_type(
"name", "StringType"
).success
with pytest.raises(AnalysisException):
test_dataframe.expect_column_values_to_be_of_type("non.nested", "StringType")
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_column_values_to_be_in_type_list(spark_session, test_dataframe):
"""
data asset expectation
"""
from pyspark.sql.utils import AnalysisException
assert test_dataframe.expect_column_values_to_be_in_type_list(
"address.street", ["StringType", "IntegerType"]
).success
assert test_dataframe.expect_column_values_to_be_in_type_list(
"`non.nested`", ["StringType", "IntegerType"]
).success
assert test_dataframe.expect_column_values_to_be_in_type_list(
"name", ["StringType", "IntegerType"]
).success
with pytest.raises(AnalysisException):
test_dataframe.expect_column_values_to_be_of_type("non.nested", "StringType")
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_column_pair_values_to_be_equal(spark_session, test_dataframe):
"""
column_pair_map_expectation
"""
from pyspark.sql.utils import AnalysisException
assert test_dataframe.expect_column_pair_values_to_be_equal(
"name", "name_duplicate"
).success
assert not test_dataframe.expect_column_pair_values_to_be_equal(
"name", "address.street"
).success
assert not test_dataframe.expect_column_pair_values_to_be_equal(
"name", "`non.nested`"
).success
# Expectation should fail when no `` surround a non-nested column with dot notation
with pytest.raises(AnalysisException):
test_dataframe.expect_column_pair_values_to_be_equal("name", "non.nested")
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_column_pair_values_A_to_be_greater_than_B(
spark_session, test_dataframe
):
"""
column_pair_map_expectation
"""
assert test_dataframe.expect_column_pair_values_A_to_be_greater_than_B(
"address.house_number", "age"
).success
assert test_dataframe.expect_column_pair_values_A_to_be_greater_than_B(
"age", "age", or_equal=True
).success
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_select_column_values_to_be_unique_within_record(
spark_session, test_dataframe
):
"""
multicolumn_map_expectation
"""
from pyspark.sql.utils import AnalysisException
assert test_dataframe.expect_select_column_values_to_be_unique_within_record(
["name", "age"]
).success
assert test_dataframe.expect_select_column_values_to_be_unique_within_record(
["address.street", "name"]
).success
assert test_dataframe.expect_select_column_values_to_be_unique_within_record(
["address.street", "`non.nested`"]
).success
# Expectation should fail when no `` surround a non-nested column with dot notation
with pytest.raises(AnalysisException):
test_dataframe.expect_select_column_values_to_be_unique_within_record(
["address.street", "non.nested"]
)
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_compound_columns_to_be_unique(spark_session, test_dataframe):
"""
multicolumn_map_expectation
"""
from pyspark.sql.utils import AnalysisException
# Positive tests
assert test_dataframe.expect_compound_columns_to_be_unique(["name", "age"]).success
assert test_dataframe.expect_compound_columns_to_be_unique(
["address.street", "name"]
).success
assert test_dataframe.expect_compound_columns_to_be_unique(
["address.street", "address.city"]
).success
assert test_dataframe.expect_compound_columns_to_be_unique(
["name_with_duplicates", "age_with_duplicates", "name"]
).success
assert test_dataframe.expect_compound_columns_to_be_unique(
["address.street", "`non.nested`"]
).success
assert test_dataframe.expect_compound_columns_to_be_unique(
["name", "name_with_duplicates"]
).success
assert test_dataframe.expect_compound_columns_to_be_unique(
[
"name",
"name_with_duplicates",
"address_with_duplicates.street",
"address_with_duplicates.city",
"address_with_duplicates.house_number",
]
).success
# Negative tests
assert not test_dataframe.expect_compound_columns_to_be_unique(
["address_with_duplicates.city", "address_with_duplicates.house_number"]
).success
assert not test_dataframe.expect_compound_columns_to_be_unique(
["name_with_duplicates"]
).success
assert not test_dataframe.expect_compound_columns_to_be_unique(
["name_with_duplicates", "address_with_duplicates.street"]
).success
assert not test_dataframe.expect_compound_columns_to_be_unique(
[
"name_with_duplicates",
"address_with_duplicates.street",
"address_with_duplicates.house_number",
]
).success
# Expectation should fail when no `` surround a non-nested column with dot notation
with pytest.raises(AnalysisException):
test_dataframe.expect_compound_columns_to_be_unique(
["address.street", "non.nested"]
)
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_column_values_to_be_unique(spark_session, test_dataframe):
"""
column_map_expectation
"""
from pyspark.sql.utils import AnalysisException
assert test_dataframe.expect_column_values_to_be_unique("name").success
assert not test_dataframe.expect_column_values_to_be_unique("address.city").success
assert test_dataframe.expect_column_values_to_be_unique("`non.nested`").success
# Expectation should fail when no `` surround a non-nested column with dot notation
with pytest.raises(AnalysisException):
test_dataframe.expect_column_values_to_be_unique("non.nested")
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_column_value_lengths_to_be_between(spark_session, test_dataframe):
"""
column_map_expectation
"""
assert test_dataframe.expect_column_value_lengths_to_be_between(
"name", 3, 7
).success
assert test_dataframe.expect_column_value_lengths_to_be_between(
"address.street", 1, 10
).success
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_column_value_lengths_to_equal(spark_session, test_dataframe):
"""
column_map_expectation
"""
assert test_dataframe.expect_column_value_lengths_to_equal("age", 1).success
assert test_dataframe.expect_column_value_lengths_to_equal(
"address.street", 8
).success
@pytest.mark.skipif(
not is_library_loadable(library_name="pyspark"),
reason="pyspark must be installed",
)
def test_expect_column_values_to_be_json_parseable(spark_session):
d1 = json.dumps({"i": [1, 2, 3], "j": 35, "k": {"x": "five", "y": 5, "z": "101"}})
d2 = json.dumps({"i": 1, "j": 2, "k": [3, 4, 5]})
d3 = json.dumps({"i": "a", "j": "b", "k": "c"})
d4 = json.dumps(
{"i": [4, 5], "j": [6, 7], "k": [8, 9], "l": {4: "x", 5: "y", 6: "z"}}
)
inner = {
"json_col": [d1, d2, d3, d4],
"not_json": [4, 5, 6, 7],
"py_dict": [
{"a": 1, "out": 1},
{"b": 2, "out": 4},
{"c": 3, "out": 9},
{"d": 4, "out": 16},
],
"most": [d1, d2, d3, "d4"],
}
data_reshaped = list(zip(*[v for _, v in inner.items()]))
df = spark_session.createDataFrame(
data_reshaped, ["json_col", "not_json", "py_dict", "most"]
)
D = SparkDFDataset(df)
D.set_default_expectation_argument("result_format", "COMPLETE")
T = [
{
"in": {"column": "json_col"},
"out": {
"success": True,
"unexpected_list": [],
},
},
{
"in": {"column": "not_json"},
"out": {
"success": False,
"unexpected_list": [4, 5, 6, 7],
},
},
{
"in": {"column": "py_dict"},
"out": {
"success": False,
"unexpected_list": [
{"a": 1, "out": 1},
{"b": 2, "out": 4},
{"c": 3, "out": 9},
{"d": 4, "out": 16},
],
},
},
{
"in": {"column": "most"},
"out": {
"success": False,
"unexpected_list": ["d4"],
},
},
{
"in": {"column": "most", "mostly": 0.75},
"out": {
"success": True,
"unexpected_index_list": [3],
"unexpected_list": ["d4"],
},
},
]
for t in T:
out = D.expect_column_values_to_be_json_parseable(**t["in"])
assert t["out"]["success"] == out.success
assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
| apache-2.0 | 7,190,094,410,767,394,000 | 30.676339 | 87 | 0.582623 | false |
Marcelpv96/SITWprac2017 | sportsBetting/migrations/0018_auto_20170515_1009.py | 1 | 1050 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-15 10:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sportsBetting', '0017_auto_20170510_1614'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='api_id',
),
migrations.AddField(
model_name='event',
name='id',
field=models.AutoField(auto_created=True, default=1, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='team',
name='created_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| gpl-3.0 | 7,969,951,120,777,904,000 | 29.882353 | 121 | 0.612381 | false |
leviroth/praw | praw/reddit.py | 1 | 20843 | """Provide the Reddit class."""
import configparser
import os
from itertools import islice
try:
from update_checker import update_check
UPDATE_CHECKER_MISSING = False
except ImportError: # pragma: no cover
UPDATE_CHECKER_MISSING = True
from prawcore import (
Authorizer,
DeviceIDAuthorizer,
ReadOnlyAuthorizer,
Redirect,
Requestor,
ScriptAuthorizer,
TrustedAuthenticator,
UntrustedAuthenticator,
session,
)
from . import models
from .config import Config
from .const import __version__, API_PATH, USER_AGENT_FORMAT
from .exceptions import ClientException
from .objector import Objector
class Reddit(object):
"""The Reddit class provides convenient access to reddit's API.
Instances of this class are the gateway to interacting with Reddit's API
through PRAW. The canonical way to obtain an instance of this class is via:
.. code-block:: python
import praw
reddit = praw.Reddit(client_id='CLIENT_ID',
client_secret="CLIENT_SECRET", password='PASSWORD',
user_agent='USERAGENT', username='USERNAME')
"""
update_checked = False
@property
def _next_unique(self):
value = self._unique_counter
self._unique_counter += 1
return value
@property
def read_only(self):
"""Return True when using the ReadOnlyAuthorizer."""
return self._core == self._read_only_core
@read_only.setter
def read_only(self, value):
"""Set or unset the use of the ReadOnlyAuthorizer.
Raise :class:`ClientException` when attempting to unset ``read_only``
and only the ReadOnlyAuthorizer is available.
"""
if value:
self._core = self._read_only_core
elif self._authorized_core is None:
raise ClientException(
"read_only cannot be unset as only the "
"ReadOnlyAuthorizer is available."
)
else:
self._core = self._authorized_core
def __enter__(self):
"""Handle the context manager open."""
return self
def __exit__(self, *_args):
"""Handle the context manager close."""
def __init__(
self,
site_name=None,
requestor_class=None,
requestor_kwargs=None,
**config_settings
): # noqa: D207, D301
"""Initialize a Reddit instance.
:param site_name: The name of a section in your ``praw.ini`` file from
which to load settings from. This parameter, in tandem with an
appropriately configured ``praw.ini``, file is useful if you wish
to easily save credentials for different applications, or
communicate with other servers running reddit. If ``site_name`` is
``None``, then the site name will be looked for in the environment
variable praw_site. If it is not found there, the DEFAULT site will
be used.
:param requestor_class: A class that will be used to create a
requestor. If not set, use ``prawcore.Requestor`` (default: None).
:param requestor_kwargs: Dictionary with additional keyword arguments
used to initialize the requestor (default: None).
Additional keyword arguments will be used to initialize the
:class:`.Config` object. This can be used to specify configuration
settings during instantiation of the :class:`.Reddit` instance. For
more details please see :ref:`configuration`.
Required settings are:
* client_id
* client_secret (for installed applications set this value to ``None``)
* user_agent
The ``requestor_class`` and ``requestor_kwargs`` allow for
customization of the requestor :class:`.Reddit` will use. This allows,
e.g., easily adding behavior to the requestor or wrapping its
|Session|_ in a caching layer. Example usage:
.. |Session| replace:: ``Session``
.. _Session: https://2.python-requests.org/en/master/api/\
#requests.Session
.. code-block:: python
import json, betamax, requests
class JSONDebugRequestor(Requestor):
def request(self, *args, **kwargs):
response = super().request(*args, **kwargs)
print(json.dumps(response.json(), indent=4))
return response
my_session = betamax.Betamax(requests.Session())
reddit = Reddit(..., requestor_class=JSONDebugRequestor,
requestor_kwargs={'session': my_session})
"""
self._core = self._authorized_core = self._read_only_core = None
self._objector = None
self._unique_counter = 0
try:
config_section = site_name or os.getenv("praw_site") or "DEFAULT"
self.config = Config(config_section, **config_settings)
except configparser.NoSectionError as exc:
help_message = (
"You provided the name of a praw.ini "
"configuration which does not exist.\n\nFor help "
"with creating a Reddit instance, visit\n"
"https://praw.readthedocs.io/en/latest/code_overvi"
"ew/reddit_instance.html\n\n"
"For help on configuring PRAW, visit\n"
"https://praw.readthedocs.io/en/latest/getting_sta"
"rted/configuration.html"
)
if site_name is not None:
exc.message += "\n" + help_message
raise
required_message = (
"Required configuration setting {!r} missing. \n"
"This setting can be provided in a praw.ini file, "
"as a keyword argument to the `Reddit` class "
"constructor, or as an environment variable."
)
for attribute in ("client_id", "user_agent"):
if getattr(self.config, attribute) in (
self.config.CONFIG_NOT_SET,
None,
):
raise ClientException(required_message.format(attribute))
if self.config.client_secret is self.config.CONFIG_NOT_SET:
raise ClientException(
required_message.format("client_secret")
+ "\nFor installed applications this value "
"must be set to None via a keyword argument "
"to the `Reddit` class constructor."
)
self._check_for_update()
self._prepare_objector()
self._prepare_prawcore(requestor_class, requestor_kwargs)
self.auth = models.Auth(self, None)
"""An instance of :class:`.Auth`.
Provides the interface for interacting with installed and web
applications. See :ref:`auth_url`
"""
self.front = models.Front(self)
"""An instance of :class:`.Front`.
Provides the interface for interacting with front page listings. For
example:
.. code-block:: python
for submission in reddit.front.hot():
print(submission)
"""
self.inbox = models.Inbox(self, None)
"""An instance of :class:`.Inbox`.
Provides the interface to a user's inbox which produces
:class:`.Message`, :class:`.Comment`, and :class:`.Submission`
instances. For example to iterate through comments which mention the
authorized user run:
.. code-block:: python
for comment in reddit.inbox.mentions():
print(comment)
"""
self.live = models.LiveHelper(self, None)
"""An instance of :class:`.LiveHelper`.
Provides the interface for working with :class:`.LiveThread`
instances. At present only new LiveThreads can be created.
.. code-block:: python
reddit.live.create('title', 'description')
"""
self.multireddit = models.MultiredditHelper(self, None)
"""An instance of :class:`.MultiredditHelper`.
Provides the interface to working with :class:`.Multireddit`
instances. For example you can obtain a :class:`.Multireddit` instance
via:
.. code-block:: python
reddit.multireddit('samuraisam', 'programming')
"""
self.redditors = models.Redditors(self, None)
"""An instance of :class:`.Redditors`.
Provides the interface for Redditor discovery. For example
to iterate over the newest Redditors, run:
.. code-block:: python
for redditor in reddit.redditors.new(limit=None):
print(redditor)
"""
self.subreddit = models.SubredditHelper(self, None)
"""An instance of :class:`.SubredditHelper`.
Provides the interface to working with :class:`.Subreddit`
instances. For example to create a Subreddit run:
.. code-block:: python
reddit.subreddit.create('coolnewsubname')
To obtain a lazy a :class:`.Subreddit` instance run:
.. code-block:: python
reddit.subreddit('redditdev')
Note that multiple subreddits can be combined and filtered views of
/r/all can also be used just like a subreddit:
.. code-block:: python
reddit.subreddit('redditdev+learnpython+botwatch')
reddit.subreddit('all-redditdev-learnpython')
"""
self.subreddits = models.Subreddits(self, None)
"""An instance of :class:`.Subreddits`.
Provides the interface for :class:`.Subreddit` discovery. For example
to iterate over the set of default subreddits run:
.. code-block:: python
for subreddit in reddit.subreddits.default(limit=None):
print(subreddit)
"""
self.user = models.User(self)
"""An instance of :class:`.User`.
Provides the interface to the currently authorized
:class:`.Redditor`. For example to get the name of the current user
run:
.. code-block:: python
print(reddit.user.me())
"""
def _check_for_update(self):
if UPDATE_CHECKER_MISSING:
return
if not Reddit.update_checked and self.config.check_for_updates:
update_check(__package__, __version__)
Reddit.update_checked = True
def _prepare_objector(self):
mappings = {
self.config.kinds["comment"]: models.Comment,
self.config.kinds["message"]: models.Message,
self.config.kinds["redditor"]: models.Redditor,
self.config.kinds["submission"]: models.Submission,
self.config.kinds["subreddit"]: models.Subreddit,
self.config.kinds["trophy"]: models.Trophy,
"Button": models.Button,
"Collection": models.Collection,
"Image": models.Image,
"LabeledMulti": models.Multireddit,
"Listing": models.Listing,
"LiveUpdate": models.LiveUpdate,
"LiveUpdateEvent": models.LiveThread,
"MenuLink": models.MenuLink,
"ModmailAction": models.ModmailAction,
"ModmailConversation": models.ModmailConversation,
"ModmailMessage": models.ModmailMessage,
"Submenu": models.Submenu,
"TrophyList": models.TrophyList,
"UserList": models.RedditorList,
"button": models.ButtonWidget,
"calendar": models.Calendar,
"community-list": models.CommunityList,
"custom": models.CustomWidget,
"id-card": models.IDCard,
"image": models.ImageWidget,
"menu": models.Menu,
"modaction": models.ModAction,
"moderators": models.ModeratorsWidget,
"more": models.MoreComments,
"post-flair": models.PostFlairWidget,
"stylesheet": models.Stylesheet,
"subreddit-rules": models.RulesWidget,
"textarea": models.TextArea,
"widget": models.Widget,
}
self._objector = Objector(self, mappings)
def _prepare_prawcore(self, requestor_class=None, requestor_kwargs=None):
requestor_class = requestor_class or Requestor
requestor_kwargs = requestor_kwargs or {}
requestor = requestor_class(
USER_AGENT_FORMAT.format(self.config.user_agent),
self.config.oauth_url,
self.config.reddit_url,
**requestor_kwargs
)
if self.config.client_secret:
self._prepare_trusted_prawcore(requestor)
else:
self._prepare_untrusted_prawcore(requestor)
def _prepare_trusted_prawcore(self, requestor):
authenticator = TrustedAuthenticator(
requestor,
self.config.client_id,
self.config.client_secret,
self.config.redirect_uri,
)
read_only_authorizer = ReadOnlyAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
if self.config.username and self.config.password:
script_authorizer = ScriptAuthorizer(
authenticator, self.config.username, self.config.password
)
self._core = self._authorized_core = session(script_authorizer)
elif self.config.refresh_token:
authorizer = Authorizer(authenticator, self.config.refresh_token)
self._core = self._authorized_core = session(authorizer)
else:
self._core = self._read_only_core
def _prepare_untrusted_prawcore(self, requestor):
authenticator = UntrustedAuthenticator(
requestor, self.config.client_id, self.config.redirect_uri
)
read_only_authorizer = DeviceIDAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
if self.config.refresh_token:
authorizer = Authorizer(authenticator, self.config.refresh_token)
self._core = self._authorized_core = session(authorizer)
else:
self._core = self._read_only_core
def comment(
self, # pylint: disable=invalid-name
id=None, # pylint: disable=redefined-builtin
url=None,
):
"""Return a lazy instance of :class:`~.Comment` for ``id``.
:param id: The ID of the comment.
:param url: A permalink pointing to the comment.
.. note:: If you want to obtain the comment's replies, you will need to
call :meth:`~.Comment.refresh` on the returned
:class:`.Comment`.
"""
return models.Comment(self, id=id, url=url)
def domain(self, domain):
"""Return an instance of :class:`.DomainListing`.
:param domain: The domain to obtain submission listings for.
"""
return models.DomainListing(self, domain)
def get(self, path, params=None):
"""Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default:
None).
"""
data = self.request("GET", path, params=params)
return self._objector.objectify(data)
def info(self, fullnames=None, url=None):
"""Fetch information about each item in ``fullnames`` or from ``url``.
:param fullnames: A list of fullnames for comments, submissions, and/or
subreddits.
:param url: A url (as a string) to retrieve lists of link submissions
from.
:returns: A generator that yields found items in their relative order.
Items that cannot be matched will not be generated. Requests will be
issued in batches for each 100 fullnames.
.. note:: For comments that are retrieved via this method, if you want
to obtain its replies, you will need to call
:meth:`~.Comment.refresh` on the yielded :class:`.Comment`.
.. note:: When using the URL option, it is important to be aware that
URLs are treated literally by Reddit's API. As such, the URLs
"youtube.com" and "https://www.youtube.com" will provide a
different set of submissions.
"""
none_count = [fullnames, url].count(None)
if none_count > 1:
raise TypeError("Either `fullnames` or `url` must be provided.")
if none_count < 1:
raise TypeError(
"Mutually exclusive parameters: `fullnames`, `url`"
)
if fullnames is not None:
if isinstance(fullnames, str):
raise TypeError("`fullnames` must be a non-str iterable.")
def generator(fullnames):
iterable = iter(fullnames)
while True:
chunk = list(islice(iterable, 100))
if not chunk:
break
params = {"id": ",".join(chunk)}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(fullnames)
def generator(url):
params = {"url": url}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(url)
def patch(self, path, data=None):
"""Return parsed objects returned from a PATCH request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
"""
data = self.request("PATCH", path, data=data)
return self._objector.objectify(data)
def post(self, path, data=None, files=None, params=None):
"""Return parsed objects returned from a POST request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
:param files: Dictionary, filename to file (like) object mapping
(default: None).
:param params: The query parameters to add to the request (default:
None).
"""
data = self.request(
"POST", path, data=data or {}, files=files, params=params
)
return self._objector.objectify(data)
def put(self, path, data=None):
"""Return parsed objects returned from a PUT request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
"""
data = self.request("PUT", path, data=data)
return self._objector.objectify(data)
def random_subreddit(self, nsfw=False):
"""Return a random lazy instance of :class:`~.Subreddit`.
:param nsfw: Return a random NSFW (not safe for work) subreddit
(default: False).
"""
url = API_PATH["subreddit"].format(
subreddit="randnsfw" if nsfw else "random"
)
path = None
try:
self.get(url, params={"unique": self._next_unique})
except Redirect as redirect:
path = redirect.path
return models.Subreddit(self, path.split("/")[2])
def redditor(self, name):
"""Return a lazy instance of :class:`~.Redditor` for ``name``.
:param name: The name of the redditor.
"""
return models.Redditor(self, name)
def request(self, method, path, params=None, data=None, files=None):
"""Return the parsed JSON data returned from a request to URL.
:param method: The HTTP method (e.g., GET, POST, PUT, DELETE).
:param path: The path to fetch.
:param params: The query parameters to add to the request (default:
None).
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
:param files: Dictionary, filename to file (like) object mapping
(default: None).
"""
return self._core.request(
method, path, data=data, files=files, params=params
)
def submission( # pylint: disable=invalid-name,redefined-builtin
self, id=None, url=None
):
"""Return a lazy instance of :class:`~.Submission`.
:param id: A reddit base36 submission ID, e.g., ``2gmzqe``.
:param url: A URL supported by
:meth:`~praw.models.Submission.id_from_url`.`.
Either ``id`` or ``url`` can be provided, but not both.
"""
return models.Submission(self, id=id, url=url)
| bsd-2-clause | 858,175,342,777,870,000 | 33.971477 | 79 | 0.593533 | false |
Dr-Drive/hycohanz | examples/create_sphere.py | 1 | 1265 | import hycohanz as hfss
raw_input('Press "Enter" to connect to HFSS.>')
[oAnsoftApp, oDesktop] = hfss.setup_interface()
raw_input('Press "Enter" to create a new project.>')
oProject = hfss.new_project(oDesktop)
raw_input('Press "Enter" to insert a new DrivenModal design named HFSSDesign1.>')
oDesign = hfss.insert_design(oProject, "HFSSDesign1", "DrivenModal")
raw_input('Press "Enter" to set the active editor to "3D Modeler" (The default and only known correct value).>')
oEditor = hfss.set_active_editor(oDesign)
raw_input('Press "Enter" to insert some circle properties into the design.>')
hfss.add_property(oDesign, "xcenter", hfss.Expression("1m"))
hfss.add_property(oDesign, "ycenter", hfss.Expression("2m"))
hfss.add_property(oDesign, "zcenter", hfss.Expression("3m"))
hfss.add_property(oDesign, "diam", hfss.Expression("1m"))
raw_input('Press "Enter" to draw a circle using the properties.>')
hfss.create_sphere(oEditor, hfss.Expression("xcenter"),
hfss.Expression("ycenter"),
hfss.Expression("zcenter"),
hfss.Expression("diameter")/2)
raw_input('Press "Enter" to quit HFSS.>')
hfss.quit_application(oDesktop)
del oEditor
del oDesign
del oProject
del oDesktop
del oAnsoftApp
| bsd-2-clause | 2,732,410,472,705,458,000 | 28.853659 | 112 | 0.705138 | false |
djb1815/Essex-MuSoc | musoc_web/schedule/views.py | 1 | 1465 | from django.shortcuts import render, redirect
from django.db import transaction
from django.contrib.auth.decorators import login_required
from .forms import ProfileNameForm, ProfileDetailForm
from django.contrib import messages
# Create your views here.
def index(request):
# Add variables in the custom_variables dict below to make them available within the rendered page
title = "Welcome"
custom_variables = {
'title': title
}
return render(request, "schedule/home.html", custom_variables)
@login_required
@transaction.atomic
def profile(request):
title = "Account Settings"
if request.method == 'POST':
name_form = ProfileNameForm(request.POST, instance=request.user)
detail_form = ProfileDetailForm(request.POST, instance=request.user.profile)
if name_form.is_valid() and detail_form.is_valid():
name_form.save()
detail_form.save()
messages.success(request, 'Your profile has been successfully updated!')
return redirect('profile')
else:
messages.error(request, 'Please correct the error below.')
else:
name_form = ProfileNameForm(instance=request.user)
detail_form = ProfileDetailForm(instance=request.user.profile)
custom_variables = {
'title': title,
'name_form': name_form,
'detail_form': detail_form
}
return render(request, "account/profile.html", custom_variables)
| mit | 6,622,172,651,528,371,000 | 34.731707 | 102 | 0.686689 | false |
cloughrm/Flask-Angular-Template | backend/pastry/resources/v1/users.py | 1 | 1720 | from pastry.db import mongo
from pastry.models import User
from pastry.resources.auth import login_required
from pastry.resources import validators, httpcodes
from bson.objectid import ObjectId
from flask import request
from flask.ext.restful import Resource, reqparse
class UsersResource(Resource):
@login_required
def get(self, id):
return mongo.db.users.find_one_or_404({'_id': ObjectId(id)})
@login_required
def delete(self, id):
return mongo.db.users.remove({'_id': ObjectId(id)})
class UsersListResource(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
if request.method == 'GET':
self.parser.add_argument('limit', type=int, default=20)
self.parser.add_argument('offset', type=int, default=0)
elif request.method == 'POST':
self.parser.add_argument('username', type=validators.email_address, required=True)
self.parser.add_argument('password', type=str, required=True)
super(UsersListResource, self).__init__()
@login_required
def get(self):
args = self.parser.parse_args()
users = mongo.db.users.find().skip(args.offset).limit(args.limit)
return {
'objects': users,
'offset': args.offset,
'limit': args.limit,
}
@login_required
def post(self):
args = self.parser.parse_args()
user = User(args.username, args.password)
if mongo.db.users.find_one({'username': user.username}):
return {'message': 'User {} already exists'.format(user.username)}, httpcodes.BAD_REQUEST
user_id = user.create()
return {'id': user_id}, httpcodes.CREATED
| mit | -4,205,179,780,906,292,700 | 33.4 | 101 | 0.640116 | false |
cmende/pytelefoob0t | plugins/8ball/__init__.py | 1 | 1199 | # Copyright 2017 Christoph Mende
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
choices = [
'It is certain',
'It is decidedly so',
'Without a doubt',
'Yes definitely',
'You may rely on it',
'As I see it, yes',
'Most likely',
'Outlook good',
'Yes',
'Signs point to yes',
'Reply hazy try again',
'Ask again later',
'Better not tell you now',
'Cannot predict now',
'Concentrate and ask again',
'Don\'t count on it',
'My reply is no',
'My sources say no',
'Outlook not so good',
'Very doubtful'
]
def eightball(user, args):
return random.choice(choices)
commands = {
'8ball': eightball
}
| apache-2.0 | 3,168,563,416,438,959,600 | 25.644444 | 75 | 0.665555 | false |
shanksauce/mintr | mintr/__init__.py | 1 | 1862 | import requests
import time
import re
from pprint import pprint
auth_headers = {}
def _validate_credentials(fn):
def wrapper(*args):
def is_not_populated(d,r):
return reduce(
lambda x,y: x or y,
map(lambda k: k not in d or not d[k], r)
)
if is_not_populated(auth_headers, ('cookie', 'token')):
raise Exception('Login first')
return fn(*args)
return wrapper
def login(username, password):
if username is None or password is None:
raise Exception('Use valid credentials')
a = requests.get('https://wwws.mint.com/login.event')
session_id = a.cookies.get('MINTJSESSIONID')
route_id = a.cookies.get('ROUTEID')
b = requests.post(
'https://wwws.mint.com/loginUserSubmit.xevent',
cookies = a.cookies,
headers = {
'Accept': 'application/json',
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': 'MINTJSESSIONID={0}; ROUTEID={1}'.format(session_id,
route_id)
},
data = {
'username': username,
'password': password,
'task': 'L'
}
)
csrf_token = b.json()['CSRFToken']
match = re.search('MINTJSESSIONID=(.*?);', b.headers['set-cookie'])
if match is None:
raise Exception('No MINTJSESSIONID')
b_session_id = match.groups(0)[0]
#@_validate_credentials
def get_account_summaries(jwt=None):
if jwt is None:
return {}
try:
d = requests.get(
'https://mint.finance.intuit.com/v1/accounts?limit=1000',
headers = {'Authorization': 'Bearer ' + jwt}
)
accounts = dict(map(
lambda x: (
x['fiName'] + ' ' + x['cpAccountName'],
x['currentBalance']
),
filter(
lambda x: x['accountStatus'] == 'ACTIVE' and x['currentBalance'] > 0,
d.json()['Account']
)
))
return accounts
except Exception as ex:
return {}
| mit | 3,864,896,717,037,192,700 | 24.162162 | 77 | 0.605263 | false |
davidtrem/ThunderStorm | thunderstorm/lightning/utils.py | 1 | 5027 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2013 Trémouilles David
#This file is part of Thunderstorm.
#
#ThunderStrom is free software: you can redistribute it and/or modify
#it under the terms of the GNU Lesser General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#ThunderStorm is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public License
#along with ThunderStorm. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions
"""
import matplotlib
from weakref import WeakValueDictionary
from weakref import WeakKeyDictionary
import warnings
class UniversalCursors(object):
def __init__(self):
self.all_cursor_orient = WeakKeyDictionary()
self.all_canvas = WeakValueDictionary()
self.all_axes = WeakValueDictionary()
self.backgrounds = {}
self.visible = True
self.needclear = False
def _onmove(self, event):
for canvas in self.all_canvas.values():
if not canvas.widgetlock.available(self):
return
if event.inaxes is None or not self.visible:
if self.needclear:
self._update(event)
for canvas in self.all_canvas.values():
canvas.draw()
self.needclear = False
return
self._update(event)
def _update(self, event):
# 1/ Reset background
for canvas in self.all_canvas.values():
canvas.restore_region(self.backgrounds[id(canvas)])
# 2/ update cursors
for cursors in self.all_cursor_orient.keys():
orient = self.all_cursor_orient[cursors]
if (event.inaxes in [line.get_axes() for line in cursors]
and self.visible):
visible = True
self.needclear = True
else:
visible = False
for line in cursors:
if orient == 'vertical':
line.set_xdata((event.xdata, event.xdata))
if orient == 'horizontal':
line.set_ydata((event.ydata, event.ydata))
line.set_visible(visible)
ax = line.get_axes()
ax.draw_artist(line)
# 3/ update canvas
for canvas in self.all_canvas.values():
canvas.blit(canvas.figure.bbox)
def _clear(self, event):
"""clear the cursor"""
self.backgrounds = {}
for canvas in self.all_canvas.values():
self.backgrounds[id(canvas)] = (
canvas.copy_from_bbox(canvas.figure.bbox))
for cursor in self.all_cursor_orient.keys():
for line in cursor:
line.set_visible(False)
def add_cursor(self, axes=(), orient='vertical', **lineprops):
class CursorList(list):
def __hash__(self):
return hash(tuple(self))
cursors = CursorList() # Required to keep weakref
for ax in axes:
self.all_axes[id(ax)] = ax
ax_canvas = ax.get_figure().canvas
if ax_canvas not in self.all_canvas.values():
#if not ax_canvas.supports_blit:
# warnings.warn("Must use canvas that support blit")
# return
self.all_canvas[id(ax_canvas)] = ax_canvas
ax_canvas.mpl_connect('motion_notify_event', self._onmove)
ax_canvas.mpl_connect('draw_event', self._clear)
if orient == 'vertical':
line = ax.axvline(ax.get_xbound()[0], visible=False,
animated=True, **lineprops)
if orient == 'horizontal':
line = ax.axhline(ax.get_ybound()[0], visible=False,
animated=True, **lineprops)
cursors.append(line)
self.all_cursor_orient[cursors] = orient
return cursors
def autoscale_visible_lines(axs):
"""
Function to autoscale only on visible lines.
"""
mplt_ver = [int(elem) for elem in matplotlib.__version__.split('.')[0:2]]
ignore = True
for line in (axs.lines):
if not line.get_visible():
continue # jump to next line if this one is not visible
if mplt_ver[0] == 0 and mplt_ver[1] < 98:
axs.dataLim.update_numerix(line.get_xdata(),
line.get_ydata(),
ignore)
else:
axs.dataLim.update_from_data_xy(line.get_xydata(),
ignore)
ignore = False
axs.autoscale_view()
return None
def neg_bool_list(a_list):
return [not elem for elem in a_list]
| gpl-3.0 | 895,360,067,562,563,700 | 35.686131 | 77 | 0.569041 | false |
MicroPyramid/forex-python | setup.py | 1 | 1589 | import io
import os
from setuptools import setup, find_packages
VERSION = '1.6'
long_description_text = """Forex Python is a Free Foreign exchange rates and currency conversion.
Features:
List all currency rates.
BitCoin price for all curuncies.
Converting amount to BitCoins.
Get historical rates for any day since 1999.
Conversion rate for one currency(ex; USD to INR).
Convert amount from one currency to other.('USD 10$' to INR).
Currency symbols.
Currency names.
Documentation: http://forex-python.readthedocs.io/en/latest/usage.html
GitHub: https://github.com/MicroPyramid/forex-python
"""
setup(
name='forex-python',
version=VERSION,
author='Micro Pyramid Informatic Pvt. Ltd.',
author_email='[email protected]',
url='https://github.com/MicroPyramid/forex-python',
description='Foreign exchange rates and currency conversion.',
long_description=long_description_text,
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
install_requires=[
'requests',
'simplejson',
],
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Internationalization',
],
)
| mit | 7,102,093,925,092,458,000 | 32.104167 | 97 | 0.680931 | false |
answer-huang/StatisticsCodeLines | statistics.py | 1 | 3024 | #coding=utf-8
__author__ = 'answer-huang'
import sys
reload(sys)
sys.setdefaultencoding('utf8')
"""
代码行统计工具
"""
import wx
from MyInfo import AboutMe
from AHDropTarget import AHDropTarget
import os
class AHFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title, wx.DefaultPosition, wx.Size(500, 380),
style=wx.DEFAULT_FRAME_STYLE ^ (wx.RESIZE_BORDER | wx.MAXIMIZE_BOX))
#style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.MINIMIZE_BOX)
self.SetTransparent(250)
self.statusbar = self.CreateStatusBar()
self.statusbar.SetForegroundColour('red')
self.statusbar.SetFieldsCount(2)
self.statusbar.SetStatusWidths([-2, -1]) #大小比例2:1
toolbar = self.CreateToolBar()
toolbar.AddSeparator()
toolbar.AddSimpleTool(1, wx.Image('about.png', wx.BITMAP_TYPE_PNG).ConvertToBitmap(), "关于我", "")
toolbar.AddSeparator()
toolbar.AddSimpleTool(2, wx.Image('donate.png', wx.BITMAP_TYPE_PNG).ConvertToBitmap(), "捐助我", "")
toolbar.Realize() #准备显示工具栏
wx.EVT_TOOL(self, 1, self.OnAboutMe)
wx.EVT_TOOL(self, 2, self.OnDonate)
self.panel = wx.Panel(self)
self.panel.SetDropTarget(AHDropTarget(self))
self.font = wx.Font(18, wx.SCRIPT, wx.BOLD, wx.LIGHT)
self.selectedPath = wx.StaticText(self.panel, -1, u'将项目拖拽到这里', pos=(178, 280))
self.selectedPath.SetFont(self.font)
self.panel.Bind(wx.EVT_ENTER_WINDOW, self.OnEnterWindow)
self.panel.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.panel.Bind(wx.EVT_MOTION, self.OnMotion)
def OnEnterWindow(self, event):
#print event.LeftIsDown()
event.Skip()
def OnLeaveWindow(self, event):
#print "leave"
event.Skip()
def OnMotion(self, event):
if event.Dragging() and event.LeftIsDown():
print '按住了鼠标移动'
event.Skip()
def UpdateStatus(self, path, codes_num):
self.statusbar.SetStatusText(path, 0)
self.statusbar.SetStatusText(codes_num, 1)
def ShowImage(self, img):
self.image = wx.Image(img, wx.BITMAP_TYPE_JPEG).Rescale(500, 350, quality=wx.IMAGE_QUALITY_HIGH)
bitmap = self.image.ConvertToBitmap()
self.logo = wx.StaticBitmap(self.panel, bitmap=bitmap, pos=(0, 0), size=(500, 350))
def ShowPathDir(self, dirList):
wx.CheckListBox(self.panel, -1, choices=dirList)
def OnAboutMe(self, event):
aboutMe = AboutMe(self)
aboutMe.ShowModal()
aboutMe.Destroy()
def OnDonate(self, event):
#wx.BeginBusyCursor()
import webbrowser
webbrowser.open('https://me.alipay.com/huangaiwu')
#wx.EndBusyCursor()
if __name__ == '__main__':
app = wx.App(redirect=False)
frame = AHFrame(None, '代码统计工具')
frame.Show(True)
app.MainLoop() | mit | 6,652,973,643,853,062,000 | 30.902174 | 105 | 0.630198 | false |
klahnakoski/ActiveData | vendor/mo_testing/fuzzytestcase.py | 1 | 9712 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
import datetime
import types
import unittest
from mo_collections.unique_index import UniqueIndex
import mo_dots
from mo_dots import coalesce, is_container, is_list, literal_field, unwrap, to_data, is_data, is_many
from mo_future import is_text, zip_longest, first
from mo_logs import Except, Log, suppress_exception
from mo_logs.strings import expand_template, quote
import mo_math
from mo_math import is_number, log10
from mo_times import dates
class FuzzyTestCase(unittest.TestCase):
"""
COMPARE STRUCTURE AND NUMBERS!
ONLY THE ATTRIBUTES IN THE expected STRUCTURE ARE TESTED TO EXIST
EXTRA ATTRIBUTES ARE IGNORED.
NUMBERS ARE MATCHED BY ...
* places (UP TO GIVEN SIGNIFICANT DIGITS)
* digits (UP TO GIVEN DECIMAL PLACES, WITH NEGATIVE MEANING LEFT-OF-UNITS)
* delta (MAXIMUM ABSOLUTE DIFFERENCE FROM expected)
"""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.default_places=15
def set_default_places(self, places):
"""
WHEN COMPARING float, HOW MANY DIGITS ARE SIGNIFICANT BY DEFAULT
"""
self.default_places=places
def assertAlmostEqual(self, test_value, expected, msg=None, digits=None, places=None, delta=None):
if delta or digits:
assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=places, delta=delta)
else:
assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=coalesce(places, self.default_places), delta=delta)
def assertEqual(self, test_value, expected, msg=None, digits=None, places=None, delta=None):
self.assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=places, delta=delta)
def assertRaises(self, problem=None, function=None, *args, **kwargs):
if function is None:
return RaiseContext(self, problem=problem or Exception)
with RaiseContext(self, problem=problem):
function(*args, **kwargs)
class RaiseContext(object):
def __init__(self, this, problem=Exception):
self.this = this
self.problem = problem
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_val:
Log.error("Expecting an error")
f = Except.wrap(exc_val)
if isinstance(self.problem, (list, tuple)):
problems = self.problem
else:
problems = [self.problem]
causes = []
for problem in problems:
if isinstance(problem, object.__class__) and issubclass(problem, BaseException) and isinstance(exc_val, problem):
return True
try:
self.this.assertIn(problem, f)
return True
except Exception as cause:
causes.append(cause)
Log.error("problem is not raised", cause=first(causes))
def assertAlmostEqual(test, expected, digits=None, places=None, msg=None, delta=None):
show_detail = True
test = unwrap(test)
expected = unwrap(expected)
try:
if test is None and (is_null_op(expected) or expected is None):
return
elif test is expected:
return
elif is_text(expected):
assertAlmostEqualValue(test, expected, msg=msg, digits=digits, places=places, delta=delta)
elif isinstance(test, UniqueIndex):
if test ^ expected:
Log.error("Sets do not match")
elif is_data(expected) and is_data(test):
for k, e in unwrap(expected).items():
t = test.get(k)
assertAlmostEqual(t, e, msg=coalesce(msg, "")+"key "+quote(k)+": ", digits=digits, places=places, delta=delta)
elif is_data(expected):
if is_many(test):
test = list(test)
if len(test) != 1:
Log.error("Expecting data, not a list")
test = test[0]
for k, e in expected.items():
try:
t = test[k]
assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta)
continue
except:
pass
t = mo_dots.get_attr(test, literal_field(k))
assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta)
elif is_container(test) and isinstance(expected, set):
test = set(to_data(t) for t in test)
if len(test) != len(expected):
Log.error(
"Sets do not match, element count different:\n{{test|json|indent}}\nexpecting{{expectedtest|json|indent}}",
test=test,
expected=expected
)
try:
return len(set(test)|expected) == len(expected)
except:
for e in expected:
for t in test:
try:
assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta)
break
except Exception as _:
pass
else:
Log.error("Sets do not match. {{value|json}} not found in {{test|json}}", value=e, test=test)
elif isinstance(expected, types.FunctionType):
return expected(test)
elif hasattr(test, "__iter__") and hasattr(expected, "__iter__"):
if test.__class__.__name__ == "ndarray": # numpy
test = test.tolist()
elif test.__class__.__name__ == "DataFrame": # pandas
test = test[test.columns[0]].values.tolist()
elif test.__class__.__name__ == "Series": # pandas
test = test.values.tolist()
if not expected and test == None:
return
if expected == None:
expected = [] # REPRESENT NOTHING
for t, e in zip_longest(test, expected):
assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta)
else:
assertAlmostEqualValue(test, expected, msg=msg, digits=digits, places=places, delta=delta)
except Exception as cause:
Log.error(
"{{test|json|limit(10000)}} does not match expected {{expected|json|limit(10000)}}",
test=test if show_detail else "[can not show]",
expected=expected if show_detail else "[can not show]",
cause=cause
)
def assertAlmostEqualValue(test, expected, digits=None, places=None, msg=None, delta=None):
"""
Snagged from unittest/case.py, then modified (Aug2014)
"""
if is_null_op(expected):
if test == None: # pandas dataframes reject any comparision with an exception!
return
else:
raise AssertionError(expand_template("{{test|json}} != NULL", locals()))
if expected == None: # None has no expectations
return
if test == expected:
# shortcut
return
if isinstance(expected, (dates.Date, datetime.datetime, datetime.date)):
return assertAlmostEqualValue(
dates.Date(test).unix,
dates.Date(expected).unix,
msg=msg,
digits=digits,
places=places,
delta=delta
)
if not is_number(expected):
# SOME SPECIAL CASES, EXPECTING EMPTY CONTAINERS IS THE SAME AS EXPECTING NULL
if is_list(expected) and len(expected) == 0 and test == None:
return
if is_data(expected) and not expected.keys() and test == None:
return
if test != expected:
raise AssertionError(expand_template("{{test|json}} != {{expected|json}}", locals()))
return
elif not is_number(test):
try:
# ASSUME IT IS A UTC DATE
test = dates.parse(test).unix
except Exception as e:
raise AssertionError(expand_template("{{test|json}} != {{expected}}", locals()))
num_param = 0
if digits != None:
num_param += 1
if places != None:
num_param += 1
if delta != None:
num_param += 1
if num_param > 1:
raise TypeError("specify only one of digits, places or delta")
if digits is not None:
with suppress_exception:
diff = log10(abs(test-expected))
if diff < digits:
return
standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{digits}} decimal places", locals())
elif delta is not None:
if abs(test - expected) <= delta:
return
standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{delta}} delta", locals())
else:
if places is None:
places = 15
with suppress_exception:
diff = mo_math.log10(abs(test-expected))
if diff == None:
return # Exactly the same
if diff < mo_math.ceiling(mo_math.log10(abs(test)))-places:
return
standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{places}} places", locals())
raise AssertionError(coalesce(msg, "") + ": (" + standardMsg + ")")
def is_null_op(v):
return v.__class__.__name__ == "NullOp"
| mpl-2.0 | -932,436,513,672,928,600 | 36.210728 | 134 | 0.576091 | false |
PopCap/GameIdea | Engine/Extras/Maya_AnimationRiggingTools/MayaTools/General/Scripts/perforceUtils.py | 1 | 32585 | import maya.cmds as cmds
from P4 import P4,P4Exception
import os, cPickle
from functools import partial
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_getLatestRevision(fileName, *args):
fileArg = fileName
#try to connect
p4 = P4()
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return
#find currently opened file name
if fileName == None:
fileName = cmds.file(q = True, sceneName = True)
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
#try to get the current file revision on local, and compare to depot
try:
#Find out the revision of the local version of the file
myFile = p4.run_have(fileName)[0]
#This will find the revision number of your local file.
localRevision = int(myFile['haveRev'])
#find out the revision number of the depot version of the file
depotVersion = p4.run_files(myFile['depotFile'])[0]
#find the depot file path
depotFile = depotVersion['depotFile']
#find the depot revision number of the file
depotRevision = int(depotVersion['rev'])
#check for latest
if localRevision != depotRevision:
syncFiles.append(depotFile)
#Check for scene references in the file
allRefs = []
references = cmds.file(q = True, reference = True)
for reference in references:
nestedRef = cmds.file(reference, q = True, reference = True)
allRefs.append(reference)
allRefs.append(nestedRef)
#loop through all found references and check for latest
for ref in allRefs:
#get revision of local file
myFile = p4.run_have(ref)[0]
#get revision number
localRefRevision = int(myFile['haveRev'])
#grab depot file info
depotRefVersion = p4.run_files(myFile['depotFile'])[0]
#depot file path
depotFile = depotRefVersion['depotFile']
#get depot's revision #
depotRefRevision = int(depotRefVersion['rev'])
#compare
if localRefRevision != depotRefRevision:
syncFiles.append(depotFile)
#if there are files to sync, do it now
if len(syncFiles) > 0:
message = "The following files are not at latest revision:\n\n"
for file in syncFiles:
message += file + "\n"
result = cmds.confirmDialog(title = "Perforce", icon = "warning", message = message, button = ["Sync", "Cancel"])
if result == "Sync":
#sync files
for f in syncFiles:
p4.run_sync(f)
#ask if user would like to reopen
if fileArg == None:
result = cmds.confirmDialog(title = "Perforce", icon = "question", message = "Sync Complete. Reopen file to get changes?", button = ["Yes", "Cancel"])
if result == "Yes":
cmds.file(fileName, open = True, force = True)
else:
cmds.confirmDialog(title = "Perforce", icon = "information", message = "This file is already at head revision.", button = "Close")
#disconnect from server
p4.disconnect()
#Handle any p4 errors that come back from trying to run the above code
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_checkOutCurrentFile(fileName, *args):
fileArg = fileName
#try to connect
p4 = P4()
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return False
#find currently opened file name
if fileName == None:
fileName = cmds.file(q = True, sceneName = True)
reopen = False
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
try:
#check to see if file is at head revision
myFile = p4.run_have(fileName)[0]
#This will find the revision number of your local file.
localRevision = int(myFile['haveRev'])
#find out the revision number of the depot version of the file
depotVersion = p4.run_files(myFile['depotFile'])[0]
#find the depot file path
depotFile = depotVersion['depotFile']
#find the depot revision number of the file
depotRevision = int(depotVersion['rev'])
#check for latest
if localRevision != depotRevision:
result = cmds.confirmDialog(title = "Perforce", icon = "warning", message = "This file is not at head revision. Please get latest and try again.", button = ["Get Latest", "Cancel"])
if result == "Get Latest":
p4_getLatestRevision(fileArg)
p4.disconnect()
else:
return False
else:
try:
#check to see if file is checked out
opened = p4.run_opened(depotFile)
if len(opened) > 0:
user = opened[0]['user']
cmds.confirmDialog(title = "Perforce", icon = "warning", message = "This file is already checked out by: " + user, button = "Close")
p4.disconnect()
else:
#check out the file
p4.run_edit(depotFile)
cmds.confirmDialog(title = "Perfoce", icon = "information", message = "This file is now checked out.", button = "Close")
p4.disconnect()
#tools path
toolsPath = cmds.internalVar(usd = True) + "mayaTools.txt"
if os.path.exists(toolsPath):
f = open(toolsPath, 'r')
mayaToolsDir = f.readline()
f.close()
return True
#Handle any p4 errors that come back from trying to run the above code
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
return False
#Handle any p4 errors that come back from trying to run the above code
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_getRevisionHistory(*args):
#try to connect
p4 = P4()
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return
#find currently opened file name
clientFile = cmds.file(q = True, sceneName = True)
reopen = False
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
#get revision history of current file
try:
#check to see if file is at head revision
myFile = p4.run_have(clientFile)[0]
depotVersion = p4.run_files(myFile['depotFile'])[0]
depotFile = depotVersion['depotFile']
history = p4.run_changes(depotFile)
info = ""
for h in history:
user = h.get("user")
change = h.get("change")
desc = h.get("desc")
if desc.find("\n") == -1:
desc = desc + "...\n"
else:
desc = desc.partition("\n")[0] + "...\n"
info += change + " by " + user + ": " + desc
#print report into a confirm dialog
cmds.confirmDialog(title = "History", icon = "information", ma = "left", message = info, button = "Close")
p4.disconnect()
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_submitCurrentFile(fileName, desc, *args):
fileArg = fileName
#try to connect
p4 = P4()
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return
#find currently opened file name
if fileName == None:
fileName = cmds.file(q = True, sceneName = True)
reopen = False
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
#SUBMIT
try:
if desc == None:
result = cmds.promptDialog(title = "Perforce", message = "Please Enter a Description..", button = ["Accept", "Cancel"], defaultButton = "Accept", dismissString = "Cancel", cancelButton = "Cancel")
else:
result = "Accept"
#process
if result == "Accept":
#description = "test"
myFile = p4.run_have(fileName)[0]
depotVersion = p4.run_files(myFile['depotFile'])[0]
depotFile = depotVersion['depotFile']
#check to see if file is checked out
opened = p4.run_opened(depotFile)
if len(opened) > 0:
opendBy = opened[0]['user']
if opendBy.lower() != owner.lower():
cmds.confirmDialog(title = "Perforce", icon = "warning", message = "This file is already checked out by: " + opendBy, button = "Close")
p4.disconnect()
return
else:
#fetch the description
if desc == None:
desc = cmds.promptDialog(q = True, text = True)
#save the file locally (so depot and HD are in sync)
openedFile = cmds.file(q = True, sceneName = True)
saveFileName = openedFile.rpartition("/")[2]
if fileArg == None:
cmds.file(f = True, save = True, options = "v = 0", type = "mayaBinary")
#grab the name of the file
fileNameWithExt = openedFile.rpartition("/")[2]
fileName = fileNameWithExt.rpartition(".")[0]
description = (desc + "\n Affected Files: " + openedFile)
#create new changelist
newChange = p4.fetch_change()
newChange._description = description
#make sure we don't add existing default changelist files.
newChange._files = []
#determine the new number so we can refetch it.
newChangeNum = int(p4.save_change(newChange)[0].split()[1])
#change changelist number
p4.run_reopen('-c', newChangeNum, depotFile)
#submit the changelist
p4.run_submit('-c', newChangeNum)
#tell the user submit was successful
if fileArg == None:
cmds.confirmDialog(title = "Perforce", icon = "information", message = "Submit Operation was successful!", button = "Close")
else:
return True
else:
#if the file is not checked out by the user, let them know
result = cmds.confirmDialog(title = "Perforce", icon = "warning", message = "File is not checked out. Unable to continue submit operation on this file:\n\n" + fileName)
except P4Exception:
if fileArg == None:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_addAndSubmitCurrentFile(fileName, description, *args):
fileArg = fileName
#try to connect
p4 = P4()
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return
#find currently opened file name
if fileName == None:
fileName = cmds.file(q = True, sceneName = True)
reopen = False
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
#find currently opened file name
proceed = False
if fileArg == None:
fileName = cmds.file(q = True, sceneName = True)
if fileName == "":
cmds.confirmDialog(title = "Perforce", icon = "warning", message = "Cannot Add file to perforce as file has no name.", button = "Close")
p4.disconnect()
return
else:
proceed = True
else:
proceed = True
#if the file has a filename,
if proceed:
try:
clientRoot = p4.fetch_client(p4.client)._Root
#check to make sure client root is in the client file path
if os.path.normpath(fileName).find(os.path.normpath(clientRoot)) == 0:
#if it was, then get a description for the changelist
if description == None:
result = cmds.promptDialog(title = "Perforce", message = "Please Enter a Description..", button = ["Accept", "Cancel"], defaultButton = "Accept", dismissString = "Cancel", cancelButton = "Cancel")
else:
result = "Accept"
if result == "Accept":
#get changelist description
if description == None:
description = cmds.promptDialog(q = True, text = True)
#create changelist
newChange = p4.fetch_change()
newChange._description = description
#make sure we don't add existing default changelist files.
newChange._files = []
#determine the new number so we can refetch it.
newChangeNum = int(p4.save_change(newChange)[0].split()[1])
#description = "test"
p4.run_add('-c', newChangeNum, fileName)
#submit the changelist
p4.run_submit('-c', newChangeNum)
#tell user operation was successful
if fileArg == None:
result = cmds.confirmDialog(title = "Perforce", icon = "information", message = "File has been successfully added to perforce and submitted!", button = ["Close", "Check Out File"])
if result == "Close":
p4.disconnect()
return
if result == "Check Out File":
p4_checkOutCurrentFile(fileName)
#return operation succuessful
return True
else:
p4.disconnect()
return
else:
cmds.confirmDialog(title = "Perforce", icon = "warning", message = "Cannot proceed. File is not under client's root, " + clientRoot, button = "Close")
p4.disconnect()
return False
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_checkForUpdates(*args):
#try to connect
p4 = P4()
#get maya tools path
toolsPath = cmds.internalVar(usd = True) + "mayaTools.txt"
if os.path.exists(toolsPath):
f = open(toolsPath, 'r')
mayaToolsDir = f.readline()
f.close()
#connect to p4
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return
#find currently opened file name
clientFile = cmds.file(q = True, sceneName = True)
reopen = False
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
#this will check the maya tools directory in p4 for any updates
try:
syncFiles = []
clientRoot = p4.fetch_client(p4.client)._Root
depotDirectories = []
#get current project
if os.path.exists(mayaToolsDir + "/General/Scripts/projectSettings.txt"):
#read to find current project
f = open(mayaToolsDir + "/General/Scripts/projectSettings.txt", 'r')
settings = cPickle.load(f)
f.close()
#write out new settings
project = settings.get("CurrentProject")
if os.path.exists(mayaToolsDir + "/General/Scripts/" + project + "_Project_Settings.txt"):
#read the depot paths to sync from the project settings
f = open(mayaToolsDir + "/General/Scripts/" + project + "_Project_Settings.txt", 'r')
settings = cPickle.load(f)
f.close()
depotDirectories = settings.get("depotPaths")
print depotDirectories
#look at each directory inside MayaTools
for dir in depotDirectories:
depotFiles = p4.run_files(dir + "...")
for each in depotFiles:
#try to compare depot to local. It is possible that there are local files not in depot, and vise versa
try:
fileInfo = p4.run_files(each['depotFile'])[0]
depotFilePath = fileInfo['depotFile']
fileName = depotFilePath.rpartition("/")[2]
#compare local files
localFile = p4.run_have(depotFilePath)[0]
localRevision = int(localFile['haveRev'])
depotRevision = int(fileInfo['rev'])
if localRevision < depotRevision:
syncFiles.append(depotFilePath)
except:
try:
#check to see if it errors out because we don't have a local version of the file
fileInfo = p4.run_files(each['depotFile'])[0]
depotFilePath = fileInfo['depotFile']
fileName = depotFilePath.rpartition("/")[2]
localFile = p4.run_have(depotFilePath)[0]
except:
action = each.get("action")
if action != "delete":
syncFiles.append(depotFilePath)
pass
#check size of syncFiles and ask user if they want to sync
if len(syncFiles) > 0:
result = cmds.confirmDialog(title = "MayaTools", icon = "warning", message = "There are new updates available to the depot directories specified by your project settings.", button = ["Update", "Not Now"])
if result == "Update":
for file in syncFiles:
p4.run_sync(file)
cmds.confirmDialog(title = "MayaTools", icon = "information", message = "Tools are now up to date!", button = "Close")
p4.disconnect()
else:
p4.disconnect()
return
#handle any errors
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def createNewProject(*args):
if cmds.window("createNewARTProject_Window", exists = True):
cmds.deleteUI("createNewARTProject_Window")
#create window
window = cmds.window("createNewARTProject_Window", w = 400, h = 600, mnb = False, mxb = False, title = "Create New Project")
#frameLayouts for settings: Perforce/Auto-Sync
mainLayout = cmds.columnLayout(w = 400, h = 600)
#project name field
projectName = cmds.textFieldGrp("newARTProjectNameField", label = "Project Name: ", w = 400, h = 30, parent = mainLayout, cal = [1, "left"])
scrollLayout = cmds.scrollLayout(w = 400, h = 520, parent = mainLayout)
columnLayout = cmds.columnLayout(w = 380, parent = scrollLayout)
#perforce/auto-sync layout
p4Frame = cmds.frameLayout(parent = columnLayout, w = 370, cll = True, label='Perforce/Auto-Sync', borderStyle='in')
p4Layout = cmds.columnLayout(w = 360, parent = p4Frame, co = ["both", 5], rs = 5)
#create the scrollField with the information
cmds.scrollField(parent = p4Layout, w = 350, h = 100, editable = False, wordWrap = True, text = "Add depot paths you would like the tools to check for updates on. If updates are found, you will be notified, and asked if you would like to sync. Valid depot paths look like:\n\n//depot/usr/jeremy_ernst/MayaTools")
#crete the add button
cmds.button(w = 350, label = "Add Perforce Depot Path", parent = p4Layout, c = partial(addPerforceDepotPath, p4Layout))
#save settings button
cmds.button(parent = mainLayout, w = 400, h = 50, label = "Save Settings and Close", c = partial(saveProjectSettings, p4Layout, False))
#show window
cmds.showWindow(window)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def editProject(project, *args):
if cmds.window("createNewARTProject_Window", exists = True):
cmds.deleteUI("createNewARTProject_Window")
#create window
window = cmds.window("createNewARTProject_Window", w = 400, h = 600, mnb = False, mxb = False, title = "Edit Project")
#frameLayouts for settings: Perforce/Auto-Sync
mainLayout = cmds.columnLayout(w = 400, h = 600)
#project name field
projectName = cmds.textFieldGrp("newARTProjectNameField", label = "Project Name: ", text = project, w = 400, h = 30, parent = mainLayout, cal = [1, "left"])
scrollLayout = cmds.scrollLayout(w = 400, h = 520, parent = mainLayout)
columnLayout = cmds.columnLayout(w = 380, parent = scrollLayout)
#perforce/auto-sync layout
p4Frame = cmds.frameLayout(parent = columnLayout, w = 370, cll = True, label='Perforce/Auto-Sync', borderStyle='in')
p4Layout = cmds.columnLayout(w = 360, parent = p4Frame, co = ["both", 5], rs = 5)
#create the scrollField with the information
cmds.scrollField(parent = p4Layout, w = 350, h = 100, editable = False, wordWrap = True, text = "Add depot paths you would like the tools to check for updates on. If updates are found, you will be notified, and asked if you would like to sync. Valid depot paths look like:\n\n//depot/usr/jeremy_ernst/MayaTools")
#crete the add button
cmds.button(w = 350, label = "Add Perforce Depot Path", parent = p4Layout, c = partial(addPerforceDepotPath, p4Layout))
#get maya tools path
toolsPath = cmds.internalVar(usd = True) + "mayaTools.txt"
if os.path.exists(toolsPath):
f = open(toolsPath, 'r')
mayaToolsDir = f.readline()
f.close()
#open the project settings and auto-fill in the info
if os.path.exists(mayaToolsDir + "/General/Scripts/" + project + "_Project_Settings.txt"):
f = open(mayaToolsDir + "/General/Scripts/" + project + "_Project_Settings.txt", 'r')
settings = cPickle.load(f)
f.close()
paths = settings.get("depotPaths")
if len(paths) > 0:
for path in paths:
#add the path
field = addPerforceDepotPath(p4Layout)
cmds.textField(field, edit = True, text = path)
#save settings button
cmds.button(parent = mainLayout, w = 400, h = 50, label = "Save Settings and Close", c = partial(saveProjectSettings, p4Layout, True))
#show window
cmds.showWindow(window)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def addPerforceDepotPath(layout, *args):
field = cmds.textField(docTag = "P4DepotPath", w = 350, parent = layout)
#add a RMB menu to remove the field
menu = cmds.popupMenu(parent = field, b = 3)
cmds.menuItem(parent = menu, label = "Remove Path", c = partial(removePerforceDepotPath, field))
return field
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def removePerforceDepotPath(field, *args):
cmds.textField(field, edit = True, visible = False, h = 1)
#cmds.deleteUI(field) This crashes maya instantly. Come ON AUTODESK
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def saveProjectSettings(perforceLayout, edit, *args):
#find p4 depot path textfields
children = cmds.columnLayout(perforceLayout, q = True, childArray = True)
textFields = []
for child in children:
if child.find("textField") == 0:
data = cmds.textField(child, q = True, docTag = True)
if data == "P4DepotPath":
textFields.append(child)
#make sure paths are valid
savePaths = []
for field in textFields:
path = cmds.textField(field, q = True, text = True)
if path != "":
try:
p4 = P4()
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
#now check
try:
depotFiles = p4.run_files(path + "...")
if len(depotFiles) > 0:
savePaths.append(path)
#handle any errors
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return
else:
#see if the text field is just hidden or if it is actually blank
vis = cmds.textField(field, q = True, visible = True)
if vis == True:
cmds.confirmDialog(title = "Error", icon = "critical", message = "Empty string not allowed as a path name. Either remove that field, or enter a correct depot path.")
return
#write out to disk
projectName = cmds.textFieldGrp("newARTProjectNameField", q = True, text = True)
if projectName == "":
cmds.confirmDialog(title = "Error", icon = "critical", message = "Empty string not allowed as a project name.")
return
#save the new project file under MayaTools/General/Scripts as projName + "_Project_Settings.txt"
toolsPath = cmds.internalVar(usd = True) + "mayaTools.txt"
if os.path.exists(toolsPath):
f = open(toolsPath, 'r')
mayaToolsDir = f.readline()
f.close()
if edit == False:
if os.path.exists(mayaToolsDir + "/General/Scripts/" + projectName + "_Project_Settings.txt"):
cmds.confirmDialog(title = "Error", icon = "critical", message = "Project already exists with that name")
return
#save out
f = open(mayaToolsDir + "/General/Scripts/" + projectName + "_Project_Settings.txt", 'w')
#create a dictionary with values
settings = {}
settings["depotPaths"] = savePaths
#write our dictionary to file
cPickle.dump(settings, f)
f.close()
#delete the UI
cmds.deleteUI("createNewARTProject_Window")
#add the project to the menu
create = True
items = cmds.lsUI(menuItems = True)
for i in items:
data = cmds.menuItem(i, q = True, docTag = True)
if data == "P4Proj":
label = cmds.menuItem(i, q = True, label = True)
print label
if label == projectName:
create = False
if create:
menuItem = cmds.menuItem(label = projectName, parent = "perforceProjectList", cl = "perforceProjectRadioMenuCollection", rb = True, docTag = "P4Proj", c = partial(setCurrentProject, projectName))
cmds.menuItem(parent = "perforceProjectList", optionBox = True, c = partial(editProject, projectName))
#open up the projectSettings.txt file and add an entry for current project
if os.path.exists(mayaToolsDir + "/General/Scripts/projectSettings.txt"):
f = open(mayaToolsDir + "/General/Scripts/projectSettings.txt", 'r')
oldSettings = cPickle.load(f)
useSourceControl = oldSettings.get("UseSourceControl")
f.close()
#write out new settings
settings = {}
settings["UseSourceControl"] = useSourceControl
settings["CurrentProject"] = projectName
f = open(mayaToolsDir + "/General/Scripts/projectSettings.txt", 'w')
cPickle.dump(settings, f)
f.close()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def setCurrentProject(projectName, *args):
#get access to maya tools path
toolsPath = cmds.internalVar(usd = True) + "mayaTools.txt"
if os.path.exists(toolsPath):
f = open(toolsPath, 'r')
mayaToolsDir = f.readline()
f.close()
#re-write settings
if os.path.exists(mayaToolsDir + "/General/Scripts/projectSettings.txt"):
f = open(mayaToolsDir + "/General/Scripts/projectSettings.txt", 'r')
oldSettings = cPickle.load(f)
useSourceControl = oldSettings.get("UseSourceControl")
f.close()
#write out new settings
settings = {}
settings["UseSourceControl"] = useSourceControl
settings["CurrentProject"] = projectName
f = open(mayaToolsDir + "/General/Scripts/projectSettings.txt", 'w')
cPickle.dump(settings, f)
f.close()
| bsd-2-clause | -8,773,135,227,386,069,000 | 14.551724 | 316 | 0.561792 | false |
priomsrb/vimswitch | vimswitch/UpdateProfileAction.py | 1 | 1247 | from .Action import Action
from .Settings import getSettings
from .SwitchProfileAction import createSwitchProfileAction
class UpdateProfileAction(Action):
def __init__(self, settings, switchProfileAction):
Action.__init__(self)
self.settings = settings
self.switchProfileAction = switchProfileAction
self.profile = None
def execute(self):
self.profile = self._getProfile()
if self.profile == self.settings.defaultProfile:
print('Cannot update default profile')
self.exitCode = -1
return
self.switchProfileAction.update = True
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
def _getProfile(self):
if self.profile is None:
if self.settings.currentProfile is None:
return self.settings.defaultProfile
else:
return self.settings.currentProfile
else:
return self.profile
def createUpdateProfileAction(app):
settings = getSettings(app)
switchProfileAction = createSwitchProfileAction(app)
updateProfileAction = UpdateProfileAction(settings, switchProfileAction)
return updateProfileAction
| gpl-2.0 | -8,137,162,348,852,659,000 | 30.974359 | 76 | 0.677626 | false |
indico/indico | indico/modules/events/registration/models/invitations.py | 1 | 3543 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from uuid import uuid4
from sqlalchemy.dialects.postgresql import UUID
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.util.enum import RichIntEnum
from indico.util.i18n import L_
from indico.util.locators import locator_property
from indico.util.string import format_repr
class InvitationState(RichIntEnum):
__titles__ = [L_('Pending'), L_('Accepted'), L_('Declined')]
pending = 0
accepted = 1
declined = 2
class RegistrationInvitation(db.Model):
"""An invitation for someone to register."""
__tablename__ = 'invitations'
__table_args__ = (db.CheckConstraint('(state = {state}) OR (registration_id IS NULL)'
.format(state=InvitationState.accepted), name='registration_state'),
db.UniqueConstraint('registration_form_id', 'email'),
{'schema': 'event_registration'})
#: The ID of the invitation
id = db.Column(
db.Integer,
primary_key=True
)
#: The UUID of the invitation
uuid = db.Column(
UUID,
index=True,
unique=True,
nullable=False,
default=lambda: str(uuid4())
)
#: The ID of the registration form
registration_form_id = db.Column(
db.Integer,
db.ForeignKey('event_registration.forms.id'),
index=True,
nullable=False
)
#: The ID of the registration (if accepted)
registration_id = db.Column(
db.Integer,
db.ForeignKey('event_registration.registrations.id'),
index=True,
unique=True,
nullable=True
)
#: The state of the invitation
state = db.Column(
PyIntEnum(InvitationState),
nullable=False,
default=InvitationState.pending
)
#: Whether registration moderation should be skipped
skip_moderation = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: The email of the invited person
email = db.Column(
db.String,
nullable=False
)
#: The first name of the invited person
first_name = db.Column(
db.String,
nullable=False
)
#: The last name of the invited person
last_name = db.Column(
db.String,
nullable=False
)
#: The affiliation of the invited person
affiliation = db.Column(
db.String,
nullable=False
)
#: The associated registration
registration = db.relationship(
'Registration',
lazy=True,
backref=db.backref(
'invitation',
lazy=True,
uselist=False
)
)
# relationship backrefs:
# - registration_form (RegistrationForm.invitations)
@locator_property
def locator(self):
return dict(self.registration_form.locator, invitation_id=self.id)
@locator.uuid
def locator(self):
"""A locator suitable for 'display' pages.
Instead of the numeric ID it uses the UUID.
"""
assert self.uuid is not None
return dict(self.registration_form.locator, invitation=self.uuid)
def __repr__(self):
full_name = f'{self.first_name} {self.last_name}'
return format_repr(self, 'id', 'registration_form_id', 'email', 'state', _text=full_name)
| mit | -4,382,065,804,384,135,000 | 27.344 | 109 | 0.61558 | false |
ajroussel/shell-nouns-data | src/extractor.py | 1 | 3851 | #!/usr/bin/env python3
import os
import argparse
import pickle
from lxml import etree
from sys import argv
from objects import *
## returns a list of ints
def to_index(s):
outlist = list()
spl1 = s.split(',')
try:
for item in spl1:
spl2 = item.split('..')
start = int(spl2[0].split('_')[1])
end = int(spl2[1].split('_')[1]) if len(spl2) > 1 else start
outlist.extend([i - 1 for i in range(start, end + 1)])
except ValueError:
print(s)
return outlist
def get_SNs(node):
snes = list()
try:
for sn in node.find("shellnouns").iter("shellnoun"):
snes.append((sn.get("content_phrases"),
to_index(sn.get("span")),
sn.get("value")))
except AttributeError:
pass
return snes
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("inputfiles", type=str, nargs='+',
help="xml input files")
ap.add_argument("-o", "--outputfile", type=str, default="sn_data.pickle",
help="name of output pickle")
ap.add_argument("-a", "--annotated", action="store_true",
help="use if xml files are annotated w/SN info")
userargs = ap.parse_args()
i = 0
corpus = Corpus()
for fname in userargs.inputfiles:
docroot = etree.parse(fname).getroot()
myname, ext = os.path.splitext(fname)
print("processing", myname + "...")
session_start = i
for turn in docroot.iter("turn"):
turn_start = i
mylang = "de" if "de" in turn.get("turn_id") else "en"
for sentence in turn.iter("sent"):
sent_start = i
for tok in sentence.iter("tok"):
corpus.tokens.append(Token(tok.text, tok.attrib, i,
mylang, session_start))
i += 1
sent_end = i
corpus.sentences.append(range(sent_start, sent_end))
turn_end = i
corpus.turns.append(range(turn_start, turn_end))
session_end = i
corpus.sessions.append(range(session_start, session_end))
if userargs.annotated:
# dict: CP id -> Antecedent
cps = dict()
for cp in docroot.find("content_phrases").iter("content_phrase"):
cp_indices = to_index(cp.get("span"))
is_nom = cp.get("nominal")
new_ante = Antecedent([corpus.tokens[x + session_start]
for x in cp_indices],
is_nom)
corpus.antecedents.append(new_ante)
cps[cp.get("id")] = new_ante
# list[tuples] "proto-Anaphor"
snes = get_SNs(docroot)
for cp_key, sn_indices, val in snes:
my_anaphor = Anaphor([corpus.tokens[x + session_start]
for x in sn_indices],
val)
corpus.anaphors.append(my_anaphor)
my_antecedents = list()
for key in cp_key.split(";"):
try:
my_antecedents.append(cps[key])
except KeyError:
pass
my_instance = RefInstance(my_anaphor, *my_antecedents)
# only keep (non-empty) entries
if my_instance.antecedents:
corpus.ref_instances.append(my_instance)
with open(userargs.outputfile, 'wb') as outfile:
print("read corpus with", len(corpus.tokens), "tokens...")
pickle.dump(corpus, outfile)
print("done!")
| gpl-3.0 | 7,349,968,066,567,672,000 | 32.780702 | 77 | 0.491041 | false |
CybOXProject/python-cybox | cybox/__init__.py | 1 | 4103 | # Copyright (c) 2020, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities
from mixbox.vendor import six
from .version import __version__ # noqa
#: Mapping of xsi:types to implementation/extension classes
_EXTENSION_MAP = {}
def _lookup_unprefixed(typename):
"""Attempts to resolve a class for the input XML type `typename`.
Args:
typename: The name of an CybOX XML type (e.g., UnixProcessStatusType)
without a namespace prefix.
Returns:
A stix.Entity implementation class for the `typename`.
Raises:
ValueError: If no class has been registered for the input `typename`.
"""
for xsi_type, klass in six.iteritems(_EXTENSION_MAP):
if typename in xsi_type:
return klass
error = "Unregistered extension type: %s" % typename
raise ValueError(error)
def _lookup_extension(xsi_type):
"""Returns a Python class for the `xsi_type` value.
Args:
xsi_type: An xsi:type value string.
Returns:
An Entity implementation class for the `xsi_type`.
Raises:
ValueError: If no class has been registered for the `xsi_type`.
"""
if xsi_type in _EXTENSION_MAP:
return _EXTENSION_MAP[xsi_type]
raise ValueError("Unregistered xsi:type %s" % xsi_type)
def lookup_extension(typeinfo, default=None):
"""Returns an Entity class for that has been registered for the
`typeinfo` value.
Note:
This is for internal use only.
Args:
typeinfo: An object or string containing type information. This can be
either an xsi:type attribute value or a stix.bindings object.
default: Return class if typeinfo is None or contains no xml type
information.
Returns:
An Entity implementation class for the `xsi_type`.
Raises:
ValueError: If no class has been registered for the `xsi_type`.
"""
if typeinfo is None and default:
return default
# If the `typeinfo` was a string, consider it a full xsi:type value.
if isinstance(typeinfo, six.string_types):
return _lookup_extension(typeinfo)
# Most extension bindings include this attribute.
if not hasattr(typeinfo, 'xml_type'):
if default:
return default
error = "Input %s is missing xml_type attribute. Cannot lookup class."
raise ValueError(error % type(typeinfo))
# Extension binding classes usually (always?) have an `xmlns_prefix`
# class attribute.
if hasattr(typeinfo, 'xmlns_prefix'):
xsi_type = "%s:%s" % (typeinfo.xmlns_prefix, typeinfo.xml_type)
return _lookup_extension(xsi_type)
# no xmlns_prefix found, try to resolve the class by just the `xml_type`
return _lookup_unprefixed(typeinfo.xml_type)
def add_extension(cls):
"""Registers an Entity class as an implementation of an xml type.
Classes must have an ``_XSI_TYPE`` class attributes to be registered. The
value of this attribute must be a valid xsi:type.
Note:
This was designed for internal use.
"""
_EXTENSION_MAP[cls._XSI_TYPE] = cls # noqa
def register_extension(cls):
"""Class decorator for registering a stix.Entity class as an implementation
of an xml type.
Classes must have an ``_XSI_TYPE`` class attributes to be registered.
Note:
This was designed for internal use.
"""
add_extension(cls)
return cls
# TODO: Should this get moved to mixbox or not?
class Unicode(entities.Entity):
"""Shim class to allow xs:string's in EntityList"""
def __init__(self, value):
super(Unicode, self).__init__()
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = six.text_type(value)
def to_obj(self, ns_info=None):
return self.value
def to_dict(self):
return self.value
@classmethod
def from_obj(cls, cls_obj):
return cls(cls_obj)
from_dict = from_obj
| bsd-3-clause | -1,116,159,880,867,913,500 | 25.816993 | 79 | 0.654155 | false |
pgmillon/ansible | lib/ansible/modules/cloud/vmware/vmware_host_facts.py | 1 | 11491 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Wei Gao <[email protected]>
# Copyright: (c) 2018, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_facts
short_description: Gathers facts about remote ESXi hostsystem
description:
- This module can be used to gathers facts like CPU, memory, datastore, network and system etc. about ESXi host system.
- Please specify hostname or IP address of ESXi host system as C(hostname).
- If hostname or IP address of vCenter is provided as C(hostname) and C(esxi_hostname) is not specified, then the
module will throw an error.
- VSAN facts added in 2.7 version.
version_added: 2.5
author:
- Wei Gao (@woshihaoren)
requirements:
- python >= 2.6
- PyVmomi
options:
esxi_hostname:
description:
- ESXi hostname.
- Host facts about the specified ESXi server will be returned.
- By specifying this option, you can select which ESXi hostsystem is returned if connecting to a vCenter.
version_added: 2.8
show_tag:
description:
- Tags related to Host are shown if set to C(True).
default: False
type: bool
required: False
version_added: 2.9
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather vmware host facts
vmware_host_facts:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
register: host_facts
delegate_to: localhost
- name: Gather vmware host facts from vCenter
vmware_host_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: "{{ esxi_hostname }}"
register: host_facts
delegate_to: localhost
- name: Gather vmware host facts from vCenter with tag information
vmware_host_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: "{{ esxi_hostname }}"
show_tag: True
register: host_facts_tag
delegate_to: localhost
- name: Get VSAN Cluster UUID from host facts
vmware_host_facts:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
register: host_facts
- set_fact:
cluster_uuid: "{{ host_facts['ansible_facts']['vsan_cluster_uuid'] }}"
'''
RETURN = r'''
ansible_facts:
description: system info about the host machine
returned: always
type: dict
sample:
{
"ansible_all_ipv4_addresses": [
"10.76.33.200"
],
"ansible_bios_date": "2011-01-01T00:00:00+00:00",
"ansible_bios_version": "0.5.1",
"ansible_datastore": [
{
"free": "11.63 GB",
"name": "datastore1",
"total": "12.50 GB"
}
],
"ansible_distribution": "VMware ESXi",
"ansible_distribution_build": "4887370",
"ansible_distribution_version": "6.5.0",
"ansible_hostname": "10.76.33.100",
"ansible_interfaces": [
"vmk0"
],
"ansible_memfree_mb": 2702,
"ansible_memtotal_mb": 4095,
"ansible_os_type": "vmnix-x86",
"ansible_processor": "Intel Xeon E312xx (Sandy Bridge)",
"ansible_processor_cores": 2,
"ansible_processor_count": 2,
"ansible_processor_vcpus": 2,
"ansible_product_name": "KVM",
"ansible_product_serial": "NA",
"ansible_system_vendor": "Red Hat",
"ansible_vmk0": {
"device": "vmk0",
"ipv4": {
"address": "10.76.33.100",
"netmask": "255.255.255.0"
},
"macaddress": "52:54:00:56:7d:59",
"mtu": 1500
},
"vsan_cluster_uuid": null,
"vsan_node_uuid": null,
"vsan_health": "unknown",
"tags": [
{
"category_id": "urn:vmomi:InventoryServiceCategory:8eb81431-b20d-49f5-af7b-126853aa1189:GLOBAL",
"category_name": "host_category_0001",
"description": "",
"id": "urn:vmomi:InventoryServiceTag:e9398232-46fd-461a-bf84-06128e182a4a:GLOBAL",
"name": "host_tag_0001"
}
],
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.formatters import bytes_to_human
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.vmware_rest_client import VmwareRestClient
try:
from com.vmware.vapi.std_client import DynamicID
except ImportError:
pass
class VMwareHostFactManager(PyVmomi):
def __init__(self, module):
super(VMwareHostFactManager, self).__init__(module)
esxi_host_name = self.params.get('esxi_hostname', None)
if self.is_vcenter():
if esxi_host_name is None:
self.module.fail_json(msg="Connected to a vCenter system without specifying esxi_hostname")
self.host = self.get_all_host_objs(esxi_host_name=esxi_host_name)
if len(self.host) > 1:
self.module.fail_json(msg="esxi_hostname matched multiple hosts")
self.host = self.host[0]
else:
self.host = find_obj(self.content, [vim.HostSystem], None)
if self.host is None:
self.module.fail_json(msg="Failed to find host system.")
def all_facts(self):
ansible_facts = {}
ansible_facts.update(self.get_cpu_facts())
ansible_facts.update(self.get_memory_facts())
ansible_facts.update(self.get_datastore_facts())
ansible_facts.update(self.get_network_facts())
ansible_facts.update(self.get_system_facts())
ansible_facts.update(self.get_vsan_facts())
ansible_facts.update(self.get_cluster_facts())
if self.params.get('show_tag'):
ansible_facts.update(self.get_tag_facts())
self.module.exit_json(changed=False, ansible_facts=ansible_facts)
def get_cluster_facts(self):
cluster_facts = {'cluster': None}
if self.host.parent and isinstance(self.host.parent, vim.ClusterComputeResource):
cluster_facts.update(cluster=self.host.parent.name)
return cluster_facts
def get_vsan_facts(self):
config_mgr = self.host.configManager.vsanSystem
if config_mgr is None:
return {
'vsan_cluster_uuid': None,
'vsan_node_uuid': None,
'vsan_health': "unknown",
}
status = config_mgr.QueryHostStatus()
return {
'vsan_cluster_uuid': status.uuid,
'vsan_node_uuid': status.nodeUuid,
'vsan_health': status.health,
}
def get_cpu_facts(self):
return {
'ansible_processor': self.host.summary.hardware.cpuModel,
'ansible_processor_cores': self.host.summary.hardware.numCpuCores,
'ansible_processor_count': self.host.summary.hardware.numCpuPkgs,
'ansible_processor_vcpus': self.host.summary.hardware.numCpuThreads,
}
def get_memory_facts(self):
return {
'ansible_memfree_mb': self.host.hardware.memorySize // 1024 // 1024 - self.host.summary.quickStats.overallMemoryUsage,
'ansible_memtotal_mb': self.host.hardware.memorySize // 1024 // 1024,
}
def get_datastore_facts(self):
facts = dict()
facts['ansible_datastore'] = []
for store in self.host.datastore:
_tmp = {
'name': store.summary.name,
'total': bytes_to_human(store.summary.capacity),
'free': bytes_to_human(store.summary.freeSpace),
}
facts['ansible_datastore'].append(_tmp)
return facts
def get_network_facts(self):
facts = dict()
facts['ansible_interfaces'] = []
facts['ansible_all_ipv4_addresses'] = []
for nic in self.host.config.network.vnic:
device = nic.device
facts['ansible_interfaces'].append(device)
facts['ansible_all_ipv4_addresses'].append(nic.spec.ip.ipAddress)
_tmp = {
'device': device,
'ipv4': {
'address': nic.spec.ip.ipAddress,
'netmask': nic.spec.ip.subnetMask,
},
'macaddress': nic.spec.mac,
'mtu': nic.spec.mtu,
}
facts['ansible_' + device] = _tmp
return facts
def get_system_facts(self):
sn = 'NA'
for info in self.host.hardware.systemInfo.otherIdentifyingInfo:
if info.identifierType.key == 'ServiceTag':
sn = info.identifierValue
facts = {
'ansible_distribution': self.host.config.product.name,
'ansible_distribution_version': self.host.config.product.version,
'ansible_distribution_build': self.host.config.product.build,
'ansible_os_type': self.host.config.product.osType,
'ansible_system_vendor': self.host.hardware.systemInfo.vendor,
'ansible_hostname': self.host.summary.config.name,
'ansible_product_name': self.host.hardware.systemInfo.model,
'ansible_product_serial': sn,
'ansible_bios_date': self.host.hardware.biosInfo.releaseDate,
'ansible_bios_version': self.host.hardware.biosInfo.biosVersion,
}
return facts
def get_tag_facts(self):
vmware_client = VmwareRestClient(self.module)
host_dynamic_obj = DynamicID(type='HostSystem', id=self.host._moId)
self.tag_service = vmware_client.api_client.tagging.Tag
self.tag_association_svc = vmware_client.api_client.tagging.TagAssociation
self.category_service = vmware_client.api_client.tagging.Category
facts = {
'tags': self.get_tags_for_object(host_dynamic_obj)
}
return facts
def get_tags_for_object(self, dobj):
"""
Return tags associated with an object
Args:
dobj: Dynamic object
Returns: List of tags associated with the given object
"""
tag_ids = self.tag_association_svc.list_attached_tags(dobj)
tags = []
for tag_id in tag_ids:
tag_obj = self.tag_service.get(tag_id)
tags.append({
'id': tag_obj.id,
'category_name': self.category_service.get(tag_obj.category_id).name,
'name': tag_obj.name,
'description': tag_obj.description,
'category_id': tag_obj.category_id,
})
return tags
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
esxi_hostname=dict(type='str', required=False),
show_tag=dict(type='bool', default=False),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
vm_host_manager = VMwareHostFactManager(module)
vm_host_manager.all_facts()
if __name__ == '__main__':
main()
| gpl-3.0 | 7,889,792,393,098,098,000 | 33.927052 | 130 | 0.597685 | false |
mlsecproject/gglsbl-rest | config.py | 1 | 1443 | from os import environ
import logging.config
from apscheduler.schedulers.background import BackgroundScheduler
from multiprocessing import cpu_count
from subprocess import Popen
logging.config.fileConfig('logging.conf')
bind = "0.0.0.0:5000"
workers = int(environ.get('WORKERS', cpu_count() * 8 + 1))
timeout = int(environ.get('TIMEOUT', 120))
access_log_format = '%(h)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" "%({X-Forwarded-For}i)s" "%({X-Forwarded-Port}i)s" "%({X-Forwarded-Proto}i)s" "%({X-Amzn-Trace-Id}i)s"'
max_requests = int(environ.get('MAX_REQUESTS', 16384))
limit_request_line = int(environ.get('LIMIT_REQUEST_LINE', 8190))
keepalive = int(environ.get('KEEPALIVE', 60))
log = logging.getLogger(__name__)
def update():
log.info("Starting update process...")
po = Popen("python3 update.py", shell=True)
log.info("Update started as PID %d", po.pid)
rc = po.wait()
log.info("Update process finished with status code %d", rc)
sched = None
def on_starting(server):
log.info("Initial database load...")
po = Popen("python3 update.py", shell=True)
log.info("Update started as PID %d", po.pid)
rc = po.wait()
log.info("Update process finished with status code %d", rc)
log.info("Starting scheduler...")
global sched
sched = BackgroundScheduler(timezone="UTC")
sched.start()
sched.add_job(update, id="update", coalesce=True, max_instances=1, trigger='interval', minutes=30)
| apache-2.0 | -1,130,091,579,888,611,300 | 33.357143 | 167 | 0.677755 | false |
estaban/pyload | module/plugins/hoster/UnrestrictLi.py | 1 | 4420 | # -*- coding: utf-8 -*-
############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
import re
from datetime import datetime, timedelta
from module.plugins.Hoster import Hoster
from module.common.json_layer import json_loads
def secondsToMidnight(gmt=0):
now = datetime.utcnow() + timedelta(hours=gmt)
if now.hour is 0 and now.minute < 10:
midnight = now
else:
midnight = now + timedelta(days=1)
midnight = midnight.replace(hour=0, minute=10, second=0, microsecond=0)
return int((midnight - now).total_seconds())
class UnrestrictLi(Hoster):
__name__ = "UnrestrictLi"
__version__ = "0.12"
__type__ = "hoster"
__pattern__ = r'https?://(?:[^/]*\.)?(unrestrict|unr)\.li'
__description__ = """Unrestrict.li hoster plugin"""
__author_name__ = "stickell"
__author_mail__ = "[email protected]"
def setup(self):
self.chunkLimit = 16
self.resumeDownload = True
def process(self, pyfile):
if re.match(self.__pattern__, pyfile.url):
new_url = pyfile.url
elif not self.account:
self.logError(_("Please enter your %s account or deactivate this plugin") % "Unrestrict.li")
self.fail("No Unrestrict.li account provided")
else:
self.logDebug("Old URL: %s" % pyfile.url)
for _ in xrange(5):
page = self.req.load('https://unrestrict.li/unrestrict.php',
post={'link': pyfile.url, 'domain': 'long'})
self.logDebug("JSON data: " + page)
if page != '':
break
else:
self.logInfo("Unable to get API data, waiting 1 minute and retry")
self.retry(5, 60, "Unable to get API data")
if 'Expired session' in page or ("You are not allowed to "
"download from this host" in page and self.premium):
self.account.relogin(self.user)
self.retry()
elif "File offline" in page:
self.offline()
elif "You are not allowed to download from this host" in page:
self.fail("You are not allowed to download from this host")
elif "You have reached your daily limit for this host" in page:
self.logWarning("Reached daily limit for this host")
self.retry(5, secondsToMidnight(gmt=2), "Daily limit for this host reached")
elif "ERROR_HOSTER_TEMPORARILY_UNAVAILABLE" in page:
self.logInfo("Hoster temporarily unavailable, waiting 1 minute and retry")
self.retry(5, 60, "Hoster is temporarily unavailable")
page = json_loads(page)
new_url = page.keys()[0]
self.api_data = page[new_url]
if new_url != pyfile.url:
self.logDebug("New URL: " + new_url)
if hasattr(self, 'api_data'):
self.setNameSize()
self.download(new_url, disposition=True)
if self.getConfig("history"):
self.load("https://unrestrict.li/history/&delete=all")
self.logInfo("Download history deleted")
def setNameSize(self):
if 'name' in self.api_data:
self.pyfile.name = self.api_data['name']
if 'size' in self.api_data:
self.pyfile.size = self.api_data['size']
| gpl-3.0 | 7,935,763,041,935,266,000 | 43.646465 | 104 | 0.541629 | false |
JanSchulz/multi-package-feedstock | recipes/python-2.7.8/run_test.py | 1 | 1289 | import platform
import os
import sys
import subprocess
from pprint import pprint
print('Python version:', platform.python_version())
print('max unicode:', sys.maxunicode)
print('architecture:', platform.architecture())
print('sys.version:', sys.version)
print('platform.machine():', platform.machine())
import _bisect
import _codecs_cn
import _codecs_hk
import _codecs_iso2022
import _codecs_jp
import _codecs_kr
import _codecs_tw
import _collections
import _csv
import _ctypes
import _ctypes_test
import _elementtree
import _functools
import _hashlib
import _heapq
import _hotshot
import _io
import _json
import _locale
import _lsprof
import _multibytecodec
import _multiprocessing
import _random
import _socket
import _sqlite3
import _ssl
import _struct
import _testcapi
import array
import audioop
import binascii
import bz2
import cPickle
import cStringIO
import cmath
import datetime
import future_builtins
import itertools
import math
import mmap
import operator
import parser
import pyexpat
#import resource
import select
import strop
#import syslog
import time
import unicodedata
import zlib
import gzip
from os import urandom
if sys.platform != 'win32':
import crypt
import fcntl
import grp
import nis
import ssl
print('OPENSSL_VERSION:', ssl.OPENSSL_VERSION)
| bsd-3-clause | -6,962,077,765,345,530,000 | 16.657534 | 51 | 0.794414 | false |
mikeh69/JammerDetect | src/audio_tones.py | 1 | 3644 | import math #import needed modules
import pyaudio #sudo apt-get install python-pyaudio
import struct
import pickle
from time import sleep
PyAudio = pyaudio.PyAudio #initialize pyaudio
#See http://en.wikipedia.org/wiki/Bit_rate#Audio
BITRATE = 48000 #number of frames per second - 44.1kHz does not work properly on RPi BCM2538!
LENGTH = 0.2 #seconds to play sound
CHUNKSIZE = int(BITRATE * LENGTH)
WAVEDATA_FILE = "/home/pi/wavedata.pickled"
class AudioTones:
def init(self):
self.player = PyAudio()
defaultCapability = self.player.get_default_host_api_info()
print("Player defaults:")
print(defaultCapability)
# fmt = self.player.get_format_from_width(2)
# fmt = pyaudio.paInt8 # supposedly 8-bit signed-integer
fmt = pyaudio.paInt16 # 16-bit signed-integer
print(self.player.is_format_supported(output_format = fmt, output_channels = 1,
rate = BITRATE, output_device = 3))
self.stream = self.player.open(format = fmt, channels = 1, rate = BITRATE, output = True, frames_per_buffer = CHUNKSIZE)
try:
print("Trying to load wavedata from file...")
f = open(WAVEDATA_FILE, "rb")
print(" File opened OK")
self.WAVEDATA = pickle.load(f)
print(" Wavedata read from file OK")
f.close()
return
except Exception as ex:
print(ex)
print("Failed to load wavedata from file, re-generating")
frequency = 200.0 # start frequency 200Hz
self.WAVEDATA = []
for index in range(0, 46): # valid index range 0 - 45, ~10log(32768)
num_fadein_frames = int(BITRATE * LENGTH * 0.05)
num_loud_frames = int(BITRATE * LENGTH * 0.7)
num_fadeout_frames = CHUNKSIZE - (num_loud_frames + num_fadein_frames)
self.WAVEDATA.append(struct.pack( "<H", 0 ))
for xx in range(num_fadein_frames):
x = xx
next_sample = int(math.sin(x/((BITRATE/frequency)/math.pi)) * 32000 * (xx/num_fadein_frames))
self.WAVEDATA[index] = self.WAVEDATA[index] + struct.pack( "<h", next_sample ) # little-endian int16
for xx in range(num_loud_frames):
x = xx + num_fadein_frames
next_sample = int(math.sin(x/((BITRATE/frequency)/math.pi)) * 32000)
self.WAVEDATA[index] = self.WAVEDATA[index] + struct.pack( "<h", next_sample ) # little-endian int16
for xx in range(num_fadeout_frames):
x = xx + num_loud_frames + num_fadein_frames
next_sample = int(math.sin(x/((BITRATE/frequency)/math.pi)) * 32000 * (1 - (xx/num_fadeout_frames)))
# next_sample = 0
self.WAVEDATA[index] = self.WAVEDATA[index] + struct.pack( "<h", next_sample)
frequency *= 1.0594631 # semitone ratio
# Save the newly-generated data to a file using Pickle:
print("Saving wavedata to file")
f = open(WAVEDATA_FILE, "wb")
pickle.dump(self.WAVEDATA, f)
f.close()
def test(self):
for index in range(0, 40):
self.stream.write(self.WAVEDATA[index])
index += 1
self.stream.stop_stream()
def play(self, index):
self.stream.write(self.WAVEDATA[index])
def close(self):
self.stream.stop_stream()
self.stream.close()
self.player.terminate()
if __name__ == "__main__":
tones = AudioTones()
tones.init()
for i in range(0, 40):
tones.play(i)
sleep(0.3)
| mit | -6,694,274,719,032,910,000 | 37.357895 | 128 | 0.586718 | false |
mirkobronzi/finance-analyzer | lib/entries.py | 1 | 3600 | """
classes Entry and Entries
"""
__author__ = 'bronzi'
from datetime import datetime
import re
#TODO: should be configurable
PUNCTUATION_REMOVER = re.compile("[0-9,\.#\-_/']")
SPACE_REMOVER = re.compile(" +")
def string_to_float(string):
"""
simply convert a string into a float
string : basestring
"""
return 0.0 if string.strip() == '' else float(string.replace(',', '.'))
class Entry:
"""
classes Entry - representing an expense/income entry
"""
def __init__(self, date, name, money_out=0.0, money_in=0.0,
comments=None):
self.date = date
self.name = name
self.money_in = money_in
self.money_out = money_out
self.comments = comments if comments else {}
self.normalized_name = Entry._normalize_entry(name)
@staticmethod
def parse(date, name, money_out='0', money_in='0', comments=None):
"""
method to parse strings and convert them into an Entry object
(all parameter are basestring)
"""
parsed_date = datetime.strptime(date, "%m/%d/%Y").date()
parsed_comments = [] if not comments else\
[x.strip().split(':') for x in comments.split(',')]
fixed_parsed_comments = map(
lambda x : x if len(x) > 1 else (x[0], ''),
parsed_comments)
return Entry(parsed_date, name, string_to_float(money_out),
string_to_float(money_in), dict(fixed_parsed_comments))
def as_tuple(self):
return (self.date, self.name, self.money_in, self.money_out,
self.comments)
@staticmethod
def _normalize_entry(name):
normalized_name = re.sub(PUNCTUATION_REMOVER, "", name)
normalized_name = re.sub(SPACE_REMOVER, " ", normalized_name)
return normalized_name.strip()
def __eq__(self, other):
return (self.date == other.date and
self.name == other.name and
self.money_in == other.money_in and
self.money_out == other.money_out and
self.comments == other.comments)
def __hash__(self):
# TODO: check a better way to implement this
# try to generate a frozen dictionary from the beginning
comments_hash = hash(frozenset(self.comments.keys())) +\
hash(frozenset(self.comments.values()))
return hash(self.date) +\
hash(self.name) +\
hash(self.money_in) +\
hash(self.money_out) +\
comments_hash
def __repr__(self):
return (str(self.date) + ' : ' + self.name + ' => (' +
str(self.money_in) + ', -' + str(self.money_out) + ') [' +
str(self.comments) + ']')
class Entries:
"""
classes Entries - representing a collection of Entry
"""
def __init__(self, *args):
self.entries = list(args)
def add(self, entry):
"""
method to add an Entry to the collection
entry : Entry
"""
self.entries.append(entry)
def extend(self, entries):
"""
method to merge the given Entries into this one
entries : Entries
"""
self.entries.extend(entries.entries)
def sorted(self):
entries_as_tuple = [entry.as_tuple() for entry in self.entries]
to_return = sorted(entries_as_tuple)
return Entries(*to_return)
def __eq__(self, other):
return self.entries == other.entries
def __repr__(self):
return str(self.entries)
def __iter__(self):
return iter(self.entries)
| gpl-3.0 | -7,055,621,584,990,460,000 | 29 | 76 | 0.563889 | false |
simone-campagna/sheru | packages/sheru/main.py | 1 | 5020 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'main',
]
import argparse
import traceback
import sys
from . import conf
from . import log
from .stream_printer import StreamPrinter
from .sheru_config import SheruConfig
def action_init(logger, printer):
from .action.init import init
sheru_config_file = conf.get_sheru_config_file()
init(sheru_config_file=sheru_config_file, logger=logger, printer=printer)
def do_action(action, logger, printer, **args):
sheru_config_file = conf.get_sheru_config_file()
sheru_config = SheruConfig(sheru_config_file)
if not 'sheru' in sheru_config:
logger.error("config file {!r} does not contain a valid sheru configuration".format(sheru_config_file))
return 2
sheru = sheru_config['sheru']
action(sheru=sheru, logger=logger, printer=printer, **args)
def action_list(logger, printer):
from .action.list import list_sheru
return do_action(list_sheru, logger=logger, printer=printer)
def action_show(logger, printer):
from .action.show import show_sheru
return do_action(show_sheru, logger=logger, printer=printer)
def action_load(logger, printer, profile):
from .action.load import load_profile
return do_action(load_profile, logger=logger, printer=printer, profile_name=profile)
def main(printer=StreamPrinter(sys.stdout), logger=None, argv=None):
if argv is None:
argv = sys.argv[1:]
if logger is None:
logger = log.get_default_logger()
default_sheru_directory = conf.get_sheru_directory()
default_sheru_config_file = conf.get_sheru_config_file()
common_parser = argparse.ArgumentParser(
add_help=False,
)
common_parser.add_argument("--dir", "-d",
dest="sheru_directory",
default=None,
help="set sheru home directory, containing sheru config files [{!r}]".format(default_sheru_directory))
common_parser.add_argument("--config", "-c",
dest="sheru_config_file",
default=None,
help="set sheru config file [{!r}]".format(default_sheru_config_file))
common_parser.add_argument("--verbose", "-v",
dest="verbose_level",
action="count",
default=0,
help="increase verbose level")
common_parser.add_argument("--dry-run", "-D",
default=False,
action="store_true",
help="dry run mode")
common_parser.add_argument("--trace", "-t",
default=False,
action="store_true",
help="trace errors")
common_parser.add_argument('--version',
action='version',
version='%(prog)s {}'.format(conf.VERSION),
help='show version and exit')
top_level_parser = argparse.ArgumentParser(
parents=[common_parser],
description="""\
Sheru - manage remote shell profiles
""",
)
subparsers = top_level_parser.add_subparsers()
init_parser = subparsers.add_parser("init",
parents=[common_parser],
description="init sheru rc dir")
init_parser.set_defaults(
action=action_init,
action_args=[])
show_parser = subparsers.add_parser("show",
parents=[common_parser],
description="show available profiles")
show_parser.set_defaults(
action=action_show,
action_args=[])
list_parser = subparsers.add_parser("list",
parents=[common_parser],
description="list available profiles")
list_parser.set_defaults(
action=action_list,
action_args=[])
load_parser = subparsers.add_parser("load",
parents=[common_parser],
description="load available profiles")
load_parser.set_defaults(
action=action_load,
action_args=["profile"])
load_parser.add_argument("profile",
type=str,
help="profile name")
args = top_level_parser.parse_args(argv)
log.set_verbose_level(logger, args.verbose_level)
action = args.action
action_args = {arg_name: getattr(args, arg_name) for arg_name in args.action_args}
action_args['logger'] = logger
action_args['printer'] = printer
conf.setup(
sheru_directory=args.sheru_directory,
sheru_config_file=args.sheru_config_file)
try:
action(**action_args)
except Exception as err:
if args.trace:
traceback.print_exc()
logger.error("{}: {}\n".format(type(err).__name__, err))
return 3
| apache-2.0 | 8,380,608,132,489,608,000 | 29.424242 | 111 | 0.655378 | false |
Si-elegans/Web-based_GUI_Tools | spirit/forms/comment_like.py | 1 | 1042 | #-*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from spirit.models.comment_like import CommentLike
class LikeForm(forms.ModelForm):
class Meta:
model = CommentLike
fields = []
def __init__(self, user=None, comment=None, *args, **kwargs):
super(LikeForm, self).__init__(*args, **kwargs)
self.user = user
self.comment = comment
def clean(self):
cleaned_data = super(LikeForm, self).clean()
like = CommentLike.objects.filter(user=self.user,
comment=self.comment)
if like.exists():
# Do this since some of the unique_together fields are excluded.
raise forms.ValidationError(_("This like already exists"))
return cleaned_data
def save(self, commit=True):
if not self.instance.pk:
self.instance.user = self.user
self.instance.comment = self.comment
return super(LikeForm, self).save(commit) | apache-2.0 | 6,114,016,247,906,903,000 | 27.189189 | 76 | 0.606526 | false |
nttks/edx-platform | lms/djangoapps/pdfgen/tests/test_api.py | 1 | 6717 | """Tests for the pdfgen API. """
from datetime import datetime
from mock import patch
from django.test import RequestFactory
from certificates import api as certs_api
from certificates.models import CertificateGenerationConfiguration, CertificateStatuses, GeneratedCertificate
from certificates.tests.factories import GeneratedCertificateFactory
from config_models.models import cache
from openedx.core.djangoapps.ga_self_paced import api as self_paced_api
from openedx.core.djangoapps.ga_task.api import AlreadyRunningError
from openedx.core.djangoapps.ga_task.tests.test_task import TaskTestMixin
from pdfgen import api as pdfgen_api
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class GenerateUserCertificateTest(ModuleStoreTestCase, TaskTestMixin):
def setUp(self):
super(GenerateUserCertificateTest, self).setUp()
# Enable the certificate generation feature
cache.clear()
CertificateGenerationConfiguration.objects.create(enabled=True)
self.user = UserFactory.create()
self.course = self._create_course()
self.request = RequestFactory().request()
# Setup mock
patcher_is_course_closed = patch.object(self_paced_api, 'is_course_closed')
self.mock_is_course_closed = patcher_is_course_closed.start()
self.mock_is_course_closed.return_value = False
self.addCleanup(patcher_is_course_closed.stop)
patcher_submit_task = patch('pdfgen.api.submit_task')
self.mock_submit_task = patcher_submit_task.start()
self.addCleanup(patcher_submit_task.stop)
def _create_course(self, is_self_paced=True, **options):
course = CourseFactory.create(
self_paced=is_self_paced,
start=datetime(2013, 9, 16, 7, 17, 28),
grade_cutoffs={'Pass': 0.5},
**options
)
# Enable certificate generation for this course
certs_api.set_cert_generation_enabled(course.id, is_self_paced)
CourseEnrollmentFactory(user=self.user, course_id=course.id)
return course
def _create_task(self, course, student):
task_input = {
'course_id': unicode(course.id),
'student_ids': [student.id],
}
return self._create_input_entry(task_input=task_input)
def _create_cert(self, status):
return GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course.id,
status=status,
download_url='http://www.example.com/cert.pdf' if status == CertificateStatuses.downloadable else '',
mode='honor',
)
def _get_cert(self, course, student):
try:
return GeneratedCertificate.objects.get(course_id=course.id, user=student)
except GeneratedCertificate.DoesNotExist:
return None
def test_successful(self):
task = self._create_task(self.course, self.user)
self.mock_submit_task.return_value = task
self.assertEqual(
pdfgen_api.generate_user_certificate(self.request, self.user, self.course),
{
'success': True,
'message': "Certificate self-generation task(task_id={task_id}) has been started.".format(
task_id=task.id),
}
)
def test_check_error_course_is_not_self_paced(self):
not_self_paced_course = self._create_course(is_self_paced=False)
self.assertEqual(
pdfgen_api.generate_user_certificate(self.request, self.user, not_self_paced_course),
{
'success': False,
'message': "Couldn't submit a certificate self-generation task because the course is not self-paced.",
}
)
cert = self._get_cert(not_self_paced_course, self.user)
self.assertEqual(cert.course_id, not_self_paced_course.id)
self.assertEqual(cert.user, self.user)
self.assertEqual(cert.status, CertificateStatuses.error)
self.assertEqual(cert.error_reason, "This course is not self-paced.")
def test_check_error_enroll_has_expired(self):
self.mock_is_course_closed.return_value = True
self.assertEqual(
pdfgen_api.generate_user_certificate(self.request, self.user, self.course),
{
'success': False,
'message': "Couldn't submit a certificate self-generation task because student's enrollment has already expired.",
}
)
cert = self._get_cert(self.course, self.user)
self.assertEqual(cert.course_id, self.course.id)
self.assertEqual(cert.user, self.user)
self.assertEqual(cert.status, CertificateStatuses.error)
self.assertEqual(cert.error_reason, "Student's enrollment has already expired")
def test_check_error_cert_has_already_created(self):
before_cert = self._create_cert(CertificateStatuses.generating)
self.assertEqual(
pdfgen_api.generate_user_certificate(self.request, self.user, self.course),
{
'success': False,
'message': "Couldn't submit a certificate self-generation task because certificate status has already created."
}
)
cert = self._get_cert(self.course, self.user)
self.assertEqual(cert.course_id, before_cert.course_id)
self.assertEqual(cert.user, before_cert.user)
self.assertEqual(cert.status, before_cert.status)
self.assertEqual(cert.error_reason, before_cert.error_reason)
def test_already_running_error(self):
self.mock_submit_task.side_effect = AlreadyRunningError()
self.assertEqual(
pdfgen_api.generate_user_certificate(self.request, self.user, self.course),
{
'success': False,
'message': "Task is already running.",
}
)
def test_unexpected_error(self):
ex = Exception('a' * 1000)
self.mock_submit_task.side_effect = ex
self.assertEqual(
pdfgen_api.generate_user_certificate(self.request, self.user, self.course),
{
'success': False,
'message': "An unexpected error occurred.",
}
)
cert = self._get_cert(self.course, self.user)
self.assertEqual(cert.course_id, self.course.id)
self.assertEqual(cert.user, self.user)
self.assertEqual(cert.status, CertificateStatuses.error)
self.assertEqual(cert.error_reason, str(ex)[:512])
| agpl-3.0 | 3,364,932,985,641,381,400 | 39.957317 | 130 | 0.64895 | false |
vIiRuS/Lagerregal | users/forms.py | 1 | 1438 | from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from users.models import Lageruser, DepartmentUser
from Lagerregal import settings
class SettingsForm(forms.ModelForm):
error_css_class = 'has-error'
class Meta:
model = Lageruser
fields = ["pagelength", "timezone", "theme", "main_department"]
help_texts = {
"pagelength": _("The number of items displayed on one page in a list."),
"main_department": _("Your Main department determines, which department devices you create are assigned to."),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["timezone"].choices[0] = ("", ugettext("Default ({0})".format(settings.TIME_ZONE)))
self.fields["timezone"].widget.choices[0] = ("", ugettext("Default ({0})".format(settings.TIME_ZONE)))
class AvatarForm(forms.ModelForm):
error_css_class = 'has-error'
avatar_clear = forms.BooleanField(required=False)
class Meta:
model = Lageruser
fields = ["avatar"]
widgets = {
"avatar": forms.FileInput()
}
class DepartmentAddUserForm(forms.ModelForm):
error_css_class = 'has-error'
class Meta:
model = DepartmentUser
widgets = {
"department": forms.HiddenInput()
}
fields = '__all__'
| bsd-3-clause | 492,863,469,276,929,150 | 30.26087 | 122 | 0.626565 | false |
dmsimard/ansible | lib/ansible/plugins/filter/core.py | 1 | 21532 | # (c) 2012, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import glob
import hashlib
import json
import ntpath
import os.path
import re
import sys
import time
import uuid
import yaml
import datetime
from functools import partial
from random import Random, SystemRandom, shuffle
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleFilterTypeError
from ansible.module_utils.six import string_types, integer_types, reraise, text_type
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common._collections_compat import Mapping
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.template import recursive_check_defined
from ansible.utils.display import Display
from ansible.utils.encrypt import passlib_or_crypt
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
display = Display()
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
default_flow_style = kw.pop('default_flow_style', None)
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, default_flow_style=default_flow_style, **kw)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
return to_text(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, sort_keys=True, *args, **kw):
'''Make verbose, human readable JSON'''
return to_json(a, indent=indent, sort_keys=sort_keys, separators=(',', ': '), *args, **kw)
def to_bool(a):
''' return a bool for the arg '''
if a is None or isinstance(a, bool):
return a
if isinstance(a, string_types):
a = a.lower()
if a in ('yes', 'on', '1', 'true', 1):
return True
return False
def to_datetime(string, format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.strptime(string, format)
def strftime(string_format, second=None):
''' return a date string using string. See https://docs.python.org/2/library/time.html#time.strftime for format '''
if second is not None:
try:
second = float(second)
except Exception:
raise AnsibleFilterError('Invalid value for epoch value (%s)' % second)
return time.strftime(string_format, time.localtime(second))
def quote(a):
''' return its argument quoted for shell usage '''
if a is None:
a = u''
return shlex_quote(to_text(a))
def fileglob(pathname):
''' return list of matched regular files for glob '''
return [g for g in glob.glob(pathname) if os.path.isfile(g)]
def regex_replace(value='', pattern='', replacement='', ignorecase=False, multiline=False):
''' Perform a `re.sub` returning a string '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_findall(value, regex, multiline=False, ignorecase=False):
''' Perform re.findall and return the list of matches '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
''' Perform re.search and return the list of matches or a backref '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val, none_val=None):
''' value ? true_val : false_val '''
if value is None and none_val is not None:
return none_val
elif bool(value):
return true_val
else:
return false_val
def regex_escape(string, re_type='python'):
string = to_text(string, errors='surrogate_or_strict', nonstring='simplerepr')
'''Escape all regular expressions special characters from STRING.'''
if re_type == 'python':
return re.escape(string)
elif re_type == 'posix_basic':
# list of BRE special chars:
# https://en.wikibooks.org/wiki/Regular_Expressions/POSIX_Basic_Regular_Expressions
return regex_replace(string, r'([].[^$*\\])', r'\\\1')
# TODO: implement posix_extended
# It's similar to, but different from python regex, which is similar to,
# but different from PCRE. It's possible that re.escape would work here.
# https://remram44.github.io/regex-cheatsheet/regex.html#programs
elif re_type == 'posix_extended':
raise AnsibleFilterError('Regex type (%s) not yet implemented' % re_type)
else:
raise AnsibleFilterError('Invalid regex type (%s)' % re_type)
def from_yaml(data):
if isinstance(data, string_types):
return yaml.safe_load(data)
return data
def from_yaml_all(data):
if isinstance(data, string_types):
return yaml.safe_load_all(data)
return data
@environmentfilter
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except Exception:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try:
h = hashlib.new(hashtype)
except Exception as e:
# hash is not supported?
raise AnsibleFilterError(e)
h.update(to_bytes(data, errors='surrogate_or_strict'))
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None, salt_size=None, rounds=None):
passlib_mapping = {
'md5': 'md5_crypt',
'blowfish': 'bcrypt',
'sha256': 'sha256_crypt',
'sha512': 'sha512_crypt',
}
hashtype = passlib_mapping.get(hashtype, hashtype)
try:
return passlib_or_crypt(password, hashtype, salt=salt, salt_size=salt_size, rounds=rounds)
except AnsibleError as e:
reraise(AnsibleFilterError, AnsibleFilterError(to_native(e), orig_exc=e), sys.exc_info()[2])
def to_uuid(string, namespace=UUID_NAMESPACE_ANSIBLE):
uuid_namespace = namespace
if not isinstance(uuid_namespace, uuid.UUID):
try:
uuid_namespace = uuid.UUID(namespace)
except (AttributeError, ValueError) as e:
raise AnsibleFilterError("Invalid value '%s' for 'namespace': %s" % (to_native(namespace), to_native(e)))
# uuid.uuid5() requires bytes on Python 2 and bytes or text or Python 3
return to_text(uuid.uuid5(uuid_namespace, to_native(string, errors='surrogate_or_strict')))
def mandatory(a, msg=None):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
if a._undefined_name is not None:
name = "'%s' " % to_text(a._undefined_name)
else:
name = ''
if msg is not None:
raise AnsibleFilterError(to_native(msg))
else:
raise AnsibleFilterError("Mandatory variable %s not defined." % name)
return a
def combine(*terms, **kwargs):
recursive = kwargs.pop('recursive', False)
list_merge = kwargs.pop('list_merge', 'replace')
if kwargs:
raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments")
# allow the user to do `[dict1, dict2, ...] | combine`
dictionaries = flatten(terms, levels=1)
# recursively check that every elements are defined (for jinja2)
recursive_check_defined(dictionaries)
if not dictionaries:
return {}
if len(dictionaries) == 1:
return dictionaries[0]
# merge all the dicts so that the dict at the end of the array have precedence
# over the dict at the beginning.
# we merge the dicts from the highest to the lowest priority because there is
# a huge probability that the lowest priority dict will be the biggest in size
# (as the low prio dict will hold the "default" values and the others will be "patches")
# and merge_hash create a copy of it's first argument.
# so high/right -> low/left is more efficient than low/left -> high/right
high_to_low_prio_dict_iterator = reversed(dictionaries)
result = next(high_to_low_prio_dict_iterator)
for dictionary in high_to_low_prio_dict_iterator:
result = merge_hash(dictionary, result, recursive, list_merge)
return result
def comment(text, style='plain', **kw):
# Predefined comment types
comment_styles = {
'plain': {
'decoration': '# '
},
'erlang': {
'decoration': '% '
},
'c': {
'decoration': '// '
},
'cblock': {
'beginning': '/*',
'decoration': ' * ',
'end': ' */'
},
'xml': {
'beginning': '<!--',
'decoration': ' - ',
'end': '-->'
}
}
# Pointer to the right comment type
style_params = comment_styles[style]
if 'decoration' in kw:
prepostfix = kw['decoration']
else:
prepostfix = style_params['decoration']
# Default params
p = {
'newline': '\n',
'beginning': '',
'prefix': (prepostfix).rstrip(),
'prefix_count': 1,
'decoration': '',
'postfix': (prepostfix).rstrip(),
'postfix_count': 1,
'end': ''
}
# Update default params
p.update(style_params)
p.update(kw)
# Compose substrings for the final string
str_beginning = ''
if p['beginning']:
str_beginning = "%s%s" % (p['beginning'], p['newline'])
str_prefix = ''
if p['prefix']:
if p['prefix'] != p['newline']:
str_prefix = str(
"%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
else:
str_prefix = str(
"%s" % (p['newline'])) * int(p['prefix_count'])
str_text = ("%s%s" % (
p['decoration'],
# Prepend each line of the text with the decorator
text.replace(
p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
# Remove trailing spaces when only decorator is on the line
"%s%s" % (p['decoration'], p['newline']),
"%s%s" % (p['decoration'].rstrip(), p['newline']))
str_postfix = p['newline'].join(
[''] + [p['postfix'] for x in range(p['postfix_count'])])
str_end = ''
if p['end']:
str_end = "%s%s" % (p['newline'], p['end'])
# Return the final string
return "%s%s%s%s%s" % (
str_beginning,
str_prefix,
str_text,
str_postfix,
str_end)
@environmentfilter
def extract(environment, item, container, morekeys=None):
if morekeys is None:
keys = [item]
elif isinstance(morekeys, list):
keys = [item] + morekeys
else:
keys = [item, morekeys]
value = container
for key in keys:
value = environment.getitem(value, key)
return value
@environmentfilter
def do_groupby(environment, value, attribute):
"""Overridden groupby filter for jinja2, to address an issue with
jinja2>=2.9.0,<2.9.5 where a namedtuple was returned which
has repr that prevents ansible.template.safe_eval.safe_eval from being
able to parse and eval the data.
jinja2<2.9.0,>=2.9.5 is not affected, as <2.9.0 uses a tuple, and
>=2.9.5 uses a standard tuple repr on the namedtuple.
The adaptation here, is to run the jinja2 `do_groupby` function, and
cast all of the namedtuples to a regular tuple.
See https://github.com/ansible/ansible/issues/20098
We may be able to remove this in the future.
"""
return [tuple(t) for t in _do_groupby(environment, value, attribute)]
def b64encode(string, encoding='utf-8'):
return to_text(base64.b64encode(to_bytes(string, encoding=encoding, errors='surrogate_or_strict')))
def b64decode(string, encoding='utf-8'):
return to_text(base64.b64decode(to_bytes(string, errors='surrogate_or_strict')), encoding=encoding)
def flatten(mylist, levels=None, skip_nulls=True):
ret = []
for element in mylist:
if skip_nulls and element in (None, 'None', 'null'):
# ignore null items
continue
elif is_sequence(element):
if levels is None:
ret.extend(flatten(element, skip_nulls=skip_nulls))
elif levels >= 1:
# decrement as we go down the stack
ret.extend(flatten(element, levels=(int(levels) - 1), skip_nulls=skip_nulls))
else:
ret.append(element)
else:
ret.append(element)
return ret
def subelements(obj, subelements, skip_missing=False):
'''Accepts a dict or list of dicts, and a dotted accessor and produces a product
of the element and the results of the dotted accessor
>>> obj = [{"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}]
>>> subelements(obj, 'groups')
[({'name': 'alice', 'groups': ['wheel'], 'authorized': ['/tmp/alice/onekey.pub']}, 'wheel')]
'''
if isinstance(obj, dict):
element_list = list(obj.values())
elif isinstance(obj, list):
element_list = obj[:]
else:
raise AnsibleFilterError('obj must be a list of dicts or a nested dict')
if isinstance(subelements, list):
subelement_list = subelements[:]
elif isinstance(subelements, string_types):
subelement_list = subelements.split('.')
else:
raise AnsibleFilterTypeError('subelements must be a list or a string')
results = []
for element in element_list:
values = element
for subelement in subelement_list:
try:
values = values[subelement]
except KeyError:
if skip_missing:
values = []
break
raise AnsibleFilterError("could not find %r key in iterated item %r" % (subelement, values))
except TypeError:
raise AnsibleFilterTypeError("the key %s should point to a dictionary, got '%s'" % (subelement, values))
if not isinstance(values, list):
raise AnsibleFilterTypeError("the key %r should point to a list, got %r" % (subelement, values))
for value in values:
results.append((element, value))
return results
def dict_to_list_of_dict_key_value_elements(mydict, key_name='key', value_name='value'):
''' takes a dictionary and transforms it into a list of dictionaries,
with each having a 'key' and 'value' keys that correspond to the keys and values of the original '''
if not isinstance(mydict, Mapping):
raise AnsibleFilterTypeError("dict2items requires a dictionary, got %s instead." % type(mydict))
ret = []
for key in mydict:
ret.append({key_name: key, value_name: mydict[key]})
return ret
def list_of_dict_key_value_elements_to_dict(mylist, key_name='key', value_name='value'):
''' takes a list of dicts with each having a 'key' and 'value' keys, and transforms the list into a dictionary,
effectively as the reverse of dict2items '''
if not is_sequence(mylist):
raise AnsibleFilterTypeError("items2dict requires a list, got %s instead." % type(mylist))
return dict((item[key_name], item[value_name]) for item in mylist)
def path_join(paths):
''' takes a sequence or a string, and return a concatenation
of the different members '''
if isinstance(paths, string_types):
return os.path.join(paths)
elif is_sequence(paths):
return os.path.join(*paths)
else:
raise AnsibleFilterTypeError("|path_join expects string or sequence, got %s instead." % type(paths))
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# jinja2 overrides
'groupby': do_groupby,
# base 64
'b64decode': b64decode,
'b64encode': b64encode,
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
'from_yaml_all': from_yaml_all,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'expandvars': partial(unicode_wrap, os.path.expandvars),
'path_join': path_join,
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
# file glob
'fileglob': fileglob,
# types
'bool': to_bool,
'to_datetime': to_datetime,
# date formatting
'strftime': strftime,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digest of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksumming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# comment-style decoration
'comment': comment,
# debug
'type_debug': lambda o: o.__class__.__name__,
# Data structures
'combine': combine,
'extract': extract,
'flatten': flatten,
'dict2items': dict_to_list_of_dict_key_value_elements,
'items2dict': list_of_dict_key_value_elements_to_dict,
'subelements': subelements,
'split': partial(unicode_wrap, text_type.split),
}
| gpl-3.0 | 2,740,070,881,585,711,600 | 31.476621 | 120 | 0.611694 | false |
stormi/tsunami | src/secondaires/familier/editeurs/famedit/__init__.py | 1 | 6790 | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur 'famedit'.
Si des redéfinitions de contexte-éditeur standard doivent être faites, elles
seront placées dans ce package
"""
from primaires.interpreteur.editeur.choix import Choix
from primaires.interpreteur.editeur.flag import Flag
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.selection import Selection
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.scripting.editeurs.edt_script import EdtScript
from secondaires.familier.constantes import *
class EdtFamedit(Presentation):
"""Classe définissant l'éditeur de fiche de familier famedit."""
nom = "famedit"
def __init__(self, personnage, fiche):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, fiche)
if personnage and fiche:
self.construire(fiche)
def __getnewargs__(self):
return (None, None)
def construire(self, fiche):
"""Construction de l'éditeur"""
# Régimes
regime = self.ajouter_choix("régime alimentaire", "r", Choix, fiche,
"regime", REGIMES)
regime.parent = self
regime.prompt = "Régime alimentaire du familier : "
regime.apercu = "{objet.regime}"
regime.aide_courte = \
"Entrez le |ent|régime|ff| du familier ou |cmd|/|ff| pour revenir " \
"à la fenêtre parente.\n\nRégimes disponibles : {}.\n\n" \
"Régime actuel : |bc|{{objet.regime}}|ff|".format(
", ".join(REGIMES))
# Harnachements supportés
harnachements = self.ajouter_choix("harnachement supportés", "h",
Selection, fiche, "harnachements", TYPES_HARNACHEMENT)
harnachements.parent = self
harnachements.prompt = "Harnachements supportés : "
harnachements.apercu = "{objet.str_harnachements}"
harnachements.aide_courte = \
"Entrez un |ent|harnachement supporté|ff| pour l'ajouter " \
"ou le retirer\nou |cmd|/|ff| pour revenir à la fenêtre " \
"parente.\n\nHarnachements possibles : " + \
", ".join(sorted(TYPES_HARNACHEMENT)) + "\nHarnachements " \
"supportés actuellement : {objet.str_harnachements}"
# Stats pouvant progresser
stats = self.ajouter_choix("stats pouvant progresser", "st",
Selection, fiche, "stats_progres", ["force", "agilite",
"robustesse", "intelligence", "charisme", "sensibilite"])
stats.parent = self
stats.prompt = "Stats pouvant augmenter automatiquement : "
stats.apercu = "{objet.str_stats_progres}"
stats.aide_courte = \
"Entrez un |ent|nom de stat|ff| pour l'ajouter " \
"ou le retirer\nou |cmd|/|ff| pour revenir à la fenêtre " \
"parente.\n\nQuand le familier gagne un niveau, il va " \
"choisir aléatoirement parmi ces stats et les\naugmenter " \
"si il a des points d'entraînement disponibles\n\nStats " \
"automatiques actuelles : {objet.str_stats_progres}"
# Monture
monture = self.ajouter_choix("peut être monté", "m", Flag, fiche,
"monture")
monture.parent = self
# Sorties verticales
verticales = self.ajouter_choix(
"peut emprunter les sorties verticales", "v", Flag, fiche,
"sorties_verticales")
verticales.parent = self
# Aller en intérieur
interieur = self.ajouter_choix("peut aller en intérieur", "l",
Flag, fiche, "aller_interieur")
interieur.parent = self
# Difficulté d'apprivoisement
difficulte = self.ajouter_choix("difficulté d'apprivoisement", "d",
Entier, fiche, "difficulte_apprivoisement")
difficulte.parent = self
difficulte.apercu = "{objet.difficulte_apprivoisement}%"
difficulte.prompt = "Entrez la difficulté d'apprivoisement du " \
"familier : "
difficulte.aide_courte = \
"Entrez |ent|la difficulté d'apprivoisement|ff| du familier\n" \
"(entre |ent|1|ff| et |ent|100|ff|) ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nDifficulté actuelle : " \
"{objet.difficulte_apprivoisement}%"
# Prix unitaire
prix = self.ajouter_choix("prix unitaire", "u",
Entier, fiche, "m_valeur")
prix.parent = self
prix.apercu = "{objet.m_valeur} pièces de bronze"
prix.prompt = "Entrez le prix unitaire du familier : "
prix.aide_courte = \
"Entrez |ent|le prix unitaire|ff| du familier" \
"ou |cmd|/|ff| pour revenir à la fenêtre parente.\n\n" \
"Prix unitaire actuel : {objet.m_valeur}"
# Script
scripts = self.ajouter_choix("scripts", "sc", EdtScript,
fiche.script)
scripts.parent = self
| bsd-3-clause | -1,508,693,470,477,261,300 | 43.394737 | 81 | 0.655009 | false |
prechelt/typecheck-decorator | typecheck/test_typing_annotations.py | 1 | 18996 | import builtins
import datetime as dt
import io
import re
import sys
import tempfile
import typing as tg
import pytest
import typecheck as tc
import typecheck.framework as fw
from typecheck.testhelper import expected
# global TypeVars, used in various places:
X = tg.TypeVar('X')
Tb = tg.TypeVar('Tb', bound=dt.date)
Tc = tg.TypeVar('Tc', str, bytes)
############################################################################
# the typing module as such:
def test_typing_module_weirdness():
# This was a Py3.4 bug in typing 3.5.0.1:
assert issubclass(tg.Iterable, tg.Generic) == (sys.version_info >= (3,5))
# ItemsView comes out with three parameters:
# one (T_co) from Iterable (via MappingView),
# two (KT, VT_co) from Generic.
# T_co must in fact be Tuple[KT, VT_co], but how would we know that?
# Three parameters makes no sense; this is a mess.
assert tg.ItemsView.__parameters__ == (tg.T_co, tg.KT, tg.VT_co)
# This is an assumption in GenericMetaChecker._can_have_instance:
assert not issubclass(tg.Sequence[X], tg.Iterable[X]) # very strange!
############################################################################
# Generic type with fixed content type
@tc.typecheck
def foo_Sequence_int_to_List_int(x: tg.Sequence[int], y) -> tg.List[int]:
x.append(y)
return x
def test_Sequence_int_with_empty_Sequence():
assert foo_Sequence_int_to_List_int([], 4) == [4]
def test_Sequence_int_OK():
assert foo_Sequence_int_to_List_int([1, 2], 4) == [1, 2, 4]
def test_Sequence_int_with_no_Sequence():
with expected(tc.InputParameterError(
"has got an incompatible value for x: 4")):
foo_Sequence_int_to_List_int(4)
def test_Sequence_int_with_wrong_Sequence():
with expected(tc.InputParameterError(
"has got an incompatible value for x: ['mystring']")):
foo_Sequence_int_to_List_int(["mystring"], 77)
def test_Sequence_int_with_wrong_result():
with expected(tc.ReturnValueError(
"has returned an incompatible value: [1, '2']")):
foo_Sequence_int_to_List_int([1], "2")
############################################################################
# Generic stand-alone functions
def test_TypeVarNamespace_without_instance():
class A:
pass
class B(A):
pass
for thetype in (int, float, str, list, dict, A, MyGeneric):
ns = fw.TypeVarNamespace()
assert not ns.is_generic_in(X)
assert not ns.is_bound(X)
ns.bind(X, thetype)
assert ns.is_bound(X)
assert ns.binding_of(X) == thetype
assert ns.is_compatible(X, thetype)
if thetype == A:
assert ns.is_compatible(X, B)
@tc.typecheck
def foo_Sequence_X_to_Sequence_X(xs: tg.Sequence[X], x: X) -> tg.Sequence[X]:
xs.append(x)
return xs
def test_Sequence_X_int_OK():
assert foo_Sequence_X_to_Sequence_X([1, 2], 4) == [1, 2, 4]
def test_Sequence_X_int_notOK():
with expected(tc.InputParameterError(
"foo_Sequence_X_to_Sequence_X() has got an incompatible value for x: a_string")):
foo_Sequence_X_to_Sequence_X([1, 2], "a_string")
############################################################################
# TypeVarNamespace, Generic class with TypeVar binding on the instance level
class MyGeneric(tg.Generic[X]):
def __init__(self, initial_element=None):
self.content = [] # a tg.Sequence
if initial_element is not None:
self.content.append(initial_element)
@tc.typecheck
def append(self, el: X):
self.content.append(el)
@tc.typecheck
def get(self, index: int) -> X:
return self.content[index]
diverse_collection = (None, False, 1, 2.0, "three", [4], {5: "six"},
MyGeneric(), MyGeneric("seven"),
MyGeneric[X](), MyGeneric[str]("seven"),
dt.date.today(), dt.datetime.now(),
) # None must be first
def test_TypeVarNamespace_with_instance():
mycollection = list(diverse_collection)[1:] # leave out None
for element in mycollection:
print(element)
thetype = type(element)
mygen = MyGeneric(element)
ns = fw.TypeVarNamespace(mygen)
assert ns._instance
assert ns.is_generic_in(X)
assert not ns.is_bound(X)
ns.bind(X, thetype)
assert ns.is_bound(X)
assert ns.binding_of(X) == thetype
assert ns.is_compatible(X, thetype)
def test_MyGeneric_OK_and_not_OK():
for element1 in diverse_collection:
for element2 in diverse_collection:
if type(element1) == type(element2):
continue
mygen = MyGeneric(element1)
mygen.append(element1) # binds X
mygen.append(element1) # checks X binding: OK
print(element1, element2)
if (issubclass(type(element1), type(element2)) or
issubclass(type(element2), type(element1))):
mygen.append(element2) # conforms to X binding
else:
with expected(tc.InputParameterError("")):
mygen.append(element2) # violates X binding
# TODO: test Generic class with multiple inheritance
############################################################################
# type variable with bound or constraint
@tc.typecheck
def foo_with_bound(date1: Tb, date2: Tb):
pass
def test_TypeVar_bound_OK_sameclass():
foo_with_bound(dt.date.today(), dt.date.today())
def test_TypeVar_bound_OK_subclass():
foo_with_bound(dt.datetime.now(), dt.datetime.now())
def test_TypeVar_bound_OK_mixed_classes():
foo_with_bound(dt.datetime.now(), dt.date.today())
foo_with_bound(dt.date.today(), dt.datetime.now())
def test_TypeVar_bound_violated():
with (expected(tc.InputParameterError(""))):
foo_with_bound(1, 2) # both same type, but not below the bound
with (expected(tc.InputParameterError(""))):
foo_with_bound(object(), object()) # above the bound
@tc.typecheck
def foo_with_constraint(date1: Tc, date2: Tc):
pass
def test_TypeVar_constraint_OK():
foo_with_constraint("str1", "str2")
foo_with_constraint(b"bytes1", b"bytes2")
def test_TypeVar_constraint_not_OK():
with (expected(tc.InputParameterError(""))):
foo_with_constraint("str1", b"bytes1")
with (expected(tc.InputParameterError(""))):
foo_with_constraint(("b","y"), ("t","e"))
############################################################################
# Generic classes subclass relationship:
def test_GenericMetaChecker_dot_can_have_subclass():
Ch = tc.typing_predicates.GenericMetaChecker # class alias
assert Ch(tg.Sequence[int])._is_possible_subclass(list, tg.Sequence[int])
assert not Ch(tg.Sequence[int])._is_possible_subclass(int, tg.Sequence[int])
assert Ch(tg.Iterable[int])._is_possible_subclass(tg.Sequence[int], tg.Iterable[int])
############################################################################
# Mapping, Set, MappingView
@tc.typecheck
def foo_Mapping_str_float_to_float(m: tg.Mapping[str,float], k: str) -> float:
return m[k]
def test_Mapping_str_float_OK():
assert foo_Mapping_str_float_to_float(dict(a=4.0), "a") == 4.0
def test_Mapping_str_float_not_OK():
with expected(tc.InputParameterError("{'a': True}")): # wrong value type
assert foo_Mapping_str_float_to_float(dict(a=True), "a") == True
with expected(tc.InputParameterError("{b'a': 4.0}")): # wrong key type
assert foo_Mapping_str_float_to_float({b'a':4.0}, b"a") == 4.0
@tc.typecheck
def foo_Set_Tc_Tc_to_bool(s: tg.Set[Tc], el: Tc) -> bool:
return el in s
def test_Set_Tc_OK():
assert foo_Set_Tc_Tc_to_bool(set(("yes","maybe")), "yes")
assert not foo_Set_Tc_Tc_to_bool(set(("yes","maybe")), "no")
assert foo_Set_Tc_Tc_to_bool(set((b"yes",b"maybe")), b"yes")
def test_Set_Tc_not_OK():
with expected(tc.InputParameterError("")):
assert foo_Set_Tc_Tc_to_bool(set(("yes",b"maybe")), "yes")
with expected(tc.InputParameterError("")):
assert foo_Set_Tc_Tc_to_bool(set((1, 2)), 2)
with expected(tc.InputParameterError("")):
assert foo_Set_Tc_Tc_to_bool(set((("yes",),("maybe",))), ("yes",))
@tc.typecheck
def foo_KeysView_to_Sequence(v: tg.KeysView[Tc]) -> tg.Sequence[Tc]:
result = [item for item in v]
result.sort()
assert len([item for item in v]) == len(result) # iterable not exhausted
return result
def test_KeysView_to_Sequence_OK():
assert foo_KeysView_to_Sequence(dict(a=11, b=12).keys()) == ['a', 'b']
assert foo_KeysView_to_Sequence({b'A':11, b'B':12}.keys()) == [b'A', b'B']
def test_KeysView_to_Sequence_not_OK():
with expected(tc.InputParameterError("v: dict_keys\(.*3.*")):
assert foo_KeysView_to_Sequence({b'A':11, b'B':12, 3:13}.keys()) == [b'A', b'B', 13]
############################################################################
# for Iterator and Container we cannot check the actual content
@tc.typecheck
def foo_Iterator(i: tg.Iterator[dt.date]):
pass
@tc.typecheck
def foo_Container(c: tg.Container[tg.Sequence[str]]):
pass
def test_Iterable_Iterator_Container_OK():
"""
No extra code is needed to check Iterable, Iterator, and Container,
because there is no suitable way to access their contents.
"""
foo_Iterator((dt.date.today(), dt.date.today()).__iter__())
foo_Container([["nested", "list"], ["of", "strings"]])
def test_Iterator_Container_content_not_OK_catchable():
"""
Because there is no suitable way to access their contents,
such generic types may still pass the typecheck if their content is
of the wrong type.
This is a fundamental problem, not an implementation gap.
The only cases where improper contents will be caught is when the argument
is _also_ tg.Iterable.
"""
with expected(tc.InputParameterError("list_iterator")):
foo_Iterator(["shouldn't be", "strings here"].__iter__())
with expected(tc.InputParameterError("3, 4")):
foo_Container([[3, 4], [5, 6]])
def test_Iterator_totally_not_OK():
with expected(tc.InputParameterError("")):
foo_Iterator((dt.date.today(), dt.date.today())) # lacks .__next__()
class MySpecialtyGeneric(tg.Container[X]):
def __init__(self, contents):
assert isinstance(contents, tg.Sequence)
self.contents = contents # an 'nice' container, but hidden within
def __iter__(self):
return self.contents.__iter__()
def __contains__(self, item):
return item in self.contents
@tc.typecheck
def foo_MySpecialtyGeneric(c: MySpecialtyGeneric[float]):
pass
def test_Container_content_not_OK_not_catchable():
"""
See above: With generics that are not otherwise checkable,
wrong contents will not be detected.
"""
incorrect_content = MySpecialtyGeneric(["shouldn't be", "strings here"])
foo_Container(incorrect_content) # cannot detect
foo_MySpecialtyGeneric(incorrect_content) # cannot detect
############################################################################
# NamedTuple
Employee = tg.NamedTuple('Employee', [('name', str), ('id', int)])
Employee2 = tg.NamedTuple('Employee2', [('name', str), ('id', int)])
@tc.typecheck
def foo_Employee(e: Employee):
pass
def test_NamedTuple_OK():
foo_Employee(Employee(name="Jones", id=99))
def test_NamedTuple_not_OK():
with expected(tc.InputParameterError("name=8, id=9)")):
foo_Employee(Employee(name=8, id=9))
with expected(tc.InputParameterError("'aaa')")):
foo_Employee(Employee(name='Jones', id='aaa'))
with expected(tc.InputParameterError("Employee2(name='Jones', id=999)")):
foo_Employee(Employee2(name='Jones', id=999))
############################################################################
# Tuple
@tc.typecheck
def foo_Tuple_int_float_to_float(t: tg.Tuple[int, float]) -> float:
return t[1]
def test_Tuple_OK():
assert foo_Tuple_int_float_to_float((2, 3.0)) == 3.0
def test_Tuple_not_OK():
with expected(tc.InputParameterError("t: 2")):
foo_Tuple_int_float_to_float(2)
with expected(tc.InputParameterError("t: (2,)")):
foo_Tuple_int_float_to_float((2,))
with expected(tc.InputParameterError("t: (2, None)")):
foo_Tuple_int_float_to_float((2, None))
with expected(tc.InputParameterError("t: None")):
foo_Tuple_int_float_to_float(None)
with expected(tc.InputParameterError("t: (2, 3)")):
foo_Tuple_int_float_to_float((2, 3))
with expected(tc.InputParameterError("t: (2, 3.0, 4.0)")):
foo_Tuple_int_float_to_float((2, 3.0, 4.0))
############################################################################
# Union
@tc.typecheck
def foo_Union_int_SequenceFloat(u: tg.Union[int, tg.Sequence[float]]):
pass
def test_Union_OK():
foo_Union_int_SequenceFloat(4)
foo_Union_int_SequenceFloat([])
foo_Union_int_SequenceFloat([4.0, 5.0])
def test_Union_not_OK():
with expected(tc.InputParameterError("u: wrong")):
foo_Union_int_SequenceFloat("wrong")
with expected(tc.InputParameterError("u: [4]")):
foo_Union_int_SequenceFloat([4])
with expected(tc.InputParameterError("u: None")):
foo_Union_int_SequenceFloat(None)
############################################################################
# Optional
# needs no implementation code, all work is done by tg itself
@tc.typecheck
def foo_Optional_Union_int_SequenceFloat(u: tg.Optional[tg.Union[int, tg.Sequence[float]]]):
pass
def test_Optional_OK():
foo_Optional_Union_int_SequenceFloat(None)
foo_Optional_Union_int_SequenceFloat(4)
foo_Optional_Union_int_SequenceFloat([])
foo_Optional_Union_int_SequenceFloat([4.0, 5.0])
def test_Optional_not_OK():
with expected(tc.InputParameterError("u: wrong")):
foo_Optional_Union_int_SequenceFloat("wrong")
with expected(tc.InputParameterError("u: [4]")):
foo_Optional_Union_int_SequenceFloat([4])
############################################################################
# Callable
@tc.typecheck
def foo_Callable(func: tg.Callable):
pass
@pytest.mark.skipif(True, reason="I have no idea what's the problem here.")
def test_Callable_OK(): # TODO: What's going wrong here?
assert callable(foo_Callable)
# Not even one of the following works:
foo_Callable(lambda: foo_Callable)
foo_Callable(lambda x: 2*x)
foo_Callable(builtins.callable)
foo_Callable(builtins.dict)
foo_Callable(builtins.len)
foo_Callable(foo_Callable)
############################################################################
# _Protocol
# is handled by TypeChecker without special code, so we do not test them all
@tc.typecheck
def foo_SupportsAbs(x: tg.SupportsAbs) -> tg.SupportsAbs:
return abs(x)
def test_SupportsAbs_OK():
assert foo_SupportsAbs(-4) == 4
assert foo_SupportsAbs(0.0) == 0.0
assert foo_SupportsAbs(True) == 1
def test_SupportsAbs_not_OK():
with expected(tc.InputParameterError("")):
foo_SupportsAbs("-4")
############################################################################
# io
# tg.io appears to be hardly useful as of 3.5
def test_io_is_halfhearted():
"""
It would be pythonic if tg.io.IO applied to all file-like objects.
But as of 3.5, it does not, which is what we assert here.
"""
with io.StringIO("my string as input") as f:
assert not isinstance(f, tg.io.TextIO)
assert not isinstance(f, tg.io.IO[str])
with tempfile.TemporaryFile("wb") as f:
if "file" in dir(f):
f = f.file # TemporaryFile() on non-POSIX platform
assert not isinstance(f, tg.io.BinaryIO)
assert not isinstance(f, tg.io.IO[bytes])
############################################################################
# re
# tg.io appears to be broken as of 3.5
def test_re_is_halfhearted():
"""
As of 3.5, the implementation of tg appears to be incomplete for TypeAlias.
All those asserts should in fact be successful.
"""
error = TypeError("Type aliases cannot be used with isinstance().")
with expected(error):
assert isinstance(re.compile("regexp"), tg.re.Pattern[str])
with expected(error):
assert isinstance(re.compile(b"byteregexp"), tg.re.Pattern[bytes])
with expected(error):
assert isinstance(re.match("regexp", "string"), tg.re.Match[str])
with expected(error):
assert isinstance(re.match(b"regexp", b"string"), tg.re.Match[bytes])
############################################################################
# 'MyClass' as str
class A:
@tc.typecheck
def foo_something(self, another: 'A') -> 'A':
return self
def test_forward_reference_OK():
a1 = A()
a2 = A()
a1.foo_something(a2)
def test_forward_reference_to_local_class_OK_or_not_OK():
class B:
@tc.typecheck
def foo_something(self, another: 'B') -> 'B':
return self
b1 = B()
b2 = B()
b1.foo_something(b2)
with expected(tc.InputParameterError("something different")):
b1.foo_something("something different")
def test_forward_reference_not_OK():
a1 = A()
with expected(tc.InputParameterError("something different")):
a1.foo_something("something different")
############################################################################
# A complex example
ComplexType = tg.Union[tg.Optional[tg.Sequence[tg.Mapping[Tc, tg.Optional[float]]]],
Tc, bool, dt.date]
@tc.typecheck
def foo_wow_thats_nested(x: ComplexType) -> tg.Union[Tc, bool, float]:
if isinstance(x, (str, bytes)):
return x[0:3]
elif isinstance(x, tg.Sequence):
return x[0][sorted(x[0].keys())[0]]
else:
return x
def test_complex_example_OK():
assert foo_wow_thats_nested(True) == True
assert foo_wow_thats_nested('string') == 'str'
assert foo_wow_thats_nested(b'bytes') == b'byt'
assert foo_wow_thats_nested([dict(a=1.0, b=2.0)]) == 1.0
assert foo_wow_thats_nested([{b'a':1.0, b'1':2.0}]) == 2.0
def test_complex_example_not_OK():
with expected(tc.InputParameterError("1")):
assert foo_wow_thats_nested(1) == 1
with expected(IndexError("")):
foo_wow_thats_nested([])
with expected(tc.ReturnValueError("")):
assert foo_wow_thats_nested(None) == None
with expected(tc.ReturnValueError("")):
assert foo_wow_thats_nested(dt.date.today()) == dt.date.today()
with expected(tc.ReturnValueError("None")):
assert foo_wow_thats_nested([dict(a=None, b=2.0)]) == None
############################################################################
# and last of all: Any
@tc.typecheck
def foo_Any(x: tg.Any) -> tg.Any:
return x
def test_Any_OK():
assert foo_Any(42)
############################################################################
| bsd-2-clause | -9,022,903,023,139,706,000 | 32.982111 | 93 | 0.593704 | false |
JukeboxPipeline/jukebox-core | src/jukeboxcore/addons/guerilla/guerillamgmt.py | 1 | 82524 | from PySide import QtGui
from jukeboxcore.log import get_logger
log = get_logger(__name__)
from jukeboxcore import ostool
from jukeboxcore import djadapter
from jukeboxcore.gui.main import JB_MainWindow, JB_Dialog, dt_to_qdatetime
from jukeboxcore.gui import treemodel
from jukeboxcore.gui import djitemdata
from jukeboxcore.plugins import JB_CoreStandaloneGuiPlugin
from jukeboxcore.gui.widgets.guerillamgmt_ui import Ui_guerillamgmt_mwin
from jukeboxcore.gui.widgets.guerilla.projectcreator_ui import Ui_projectcreator_dialog
from jukeboxcore.gui.widgets.guerilla.prjadder_ui import Ui_prjadder_dialog
from jukeboxcore.gui.widgets.guerilla.seqcreator_ui import Ui_seqcreator_dialog
from jukeboxcore.gui.widgets.guerilla.atypecreator_ui import Ui_atypecreator_dialog
from jukeboxcore.gui.widgets.guerilla.atypeadder_ui import Ui_atypeadder_dialog
from jukeboxcore.gui.widgets.guerilla.depcreator_ui import Ui_depcreator_dialog
from jukeboxcore.gui.widgets.guerilla.depadder_ui import Ui_depadder_dialog
from jukeboxcore.gui.widgets.guerilla.usercreator_ui import Ui_usercreator_dialog
from jukeboxcore.gui.widgets.guerilla.useradder_ui import Ui_useradder_dialog
from jukeboxcore.gui.widgets.guerilla.shotcreator_ui import Ui_shotcreator_dialog
from jukeboxcore.gui.widgets.guerilla.assetcreator_ui import Ui_assetcreator_dialog
from jukeboxcore.gui.widgets.guerilla.assetadder_ui import Ui_assetadder_dialog
from jukeboxcore.gui.widgets.guerilla.taskcreator_ui import Ui_taskcreator_dialog
class ProjectCreatorDialog(JB_Dialog, Ui_projectcreator_dialog):
"""A Dialog to create a project
"""
def __init__(self, parent=None, flags=0):
"""Initialize a new project creator dialog
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(ProjectCreatorDialog, self).__init__(parent, flags)
self.project = None
self.setupUi(self)
self.create_pb.clicked.connect(self.create_prj)
def create_prj(self, ):
"""Create a project and store it in the self.project
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
short = self.short_le.text()
path = self.path_le.text()
semester = self.semester_le.text()
try:
prj = djadapter.models.Project(name=name, short=short, path=path, semester=semester)
prj.save()
self.project = prj
self.accept()
except:
log.exception("Could not create new project")
class ProjectAdderDialog(JB_Dialog, Ui_prjadder_dialog):
"""A Dialog to add project to a project
"""
def __init__(self, atype=None, department=None, user=None, parent=None, flags=0):
"""Initialize a new project creator dialog
:param atype: the atype to add the project to
:type atype: :class:`djadapter.models.Atype`
:param department: the department to add the project to
:type department: :class:`djadapter.models.Department`
:param parent: the parent object
:param user: the user to tadd the project to
:type user: :class:`djadapter.models.User`
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(ProjectAdderDialog, self).__init__(parent, flags)
self._atype = atype
self._dep = department
self._user = user
self.projects = []
self.setupUi(self)
self.add_pb.clicked.connect(self.add_project)
rootdata = treemodel.ListItemData(["Name", "Description"])
rootitem = treemodel.TreeItem(rootdata)
if atype:
projects = djadapter.projects.exclude(pk__in = atype.projects.all())
elif department:
projects = djadapter.projects.exclude(pk__in = department.projects.all())
else:
projects = djadapter.projects.exclude(users=user)
for project in projects:
projectdata = djitemdata.ProjectItemData(project)
treemodel.TreeItem(projectdata, rootitem)
self.model = treemodel.TreeModel(rootitem)
self.prj_tablev.setModel(self.model)
def add_project(self, ):
"""Add a project and store it in the self.projects
:returns: None
:rtype: None
:raises: None
"""
i = self.prj_tablev.currentIndex()
item = i.internalPointer()
if item:
project = item.internal_data()
if self._atype:
self._atype.projects.add(project)
elif self._dep:
self._dep.projects.add(project)
else:
project.users.add(self._user)
self.projects.append(project)
item.set_parent(None)
class SequenceCreatorDialog(JB_Dialog, Ui_seqcreator_dialog):
"""A Dialog to create a sequence
"""
def __init__(self, project, parent=None, flags=0):
"""Initialize a new sequence creator dialog
:param project: The project for the sequence
:type project: :class:`jukeboxcore.djadapter.models.Project`
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(SequenceCreatorDialog, self).__init__(parent, flags)
self._project = project
self.sequence = None
self.setupUi(self)
self.create_pb.clicked.connect(self.create_seq)
def create_seq(self, ):
"""Create a sequence and store it in the self.sequence
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
desc = self.desc_pte.toPlainText()
try:
seq = djadapter.models.Sequence(name=name, project=self._project, description=desc)
seq.save()
self.sequence = seq
self.accept()
except:
log.exception("Could not create new sequence")
class AtypeCreatorDialog(JB_Dialog, Ui_atypecreator_dialog):
"""A Dialog to create a atype
"""
def __init__(self, projects=None, parent=None, flags=0):
"""Initialize a new atype creator dialog
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(AtypeCreatorDialog, self).__init__(parent, flags)
self.projects = projects or []
self.atype = None
self.setupUi(self)
self.create_pb.clicked.connect(self.create_atype)
def create_atype(self, ):
"""Create a atype and store it in the self.atype
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
desc = self.desc_pte.toPlainText()
try:
atype = djadapter.models.Atype(name=name, description=desc)
atype.save()
for prj in self.projects:
atype.projects.add(prj)
self.atype = atype
self.accept()
except:
log.exception("Could not create new assettype")
class AtypeAdderDialog(JB_Dialog, Ui_atypeadder_dialog):
"""A Dialog to add atype to a project
"""
def __init__(self, project, parent=None, flags=0):
"""Initialize a new atype creator dialog
:param project: The project for the atypes
:type project: :class:`jukeboxcore.djadapter.models.Project`
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(AtypeAdderDialog, self).__init__(parent, flags)
self._project = project
self.atypes = []
self.setupUi(self)
self.add_pb.clicked.connect(self.add_atype)
rootdata = treemodel.ListItemData(["Name", "Description"])
rootitem = treemodel.TreeItem(rootdata)
atypes = djadapter.atypes.exclude(projects=project)
for atype in atypes:
atypedata = djitemdata.AtypeItemData(atype)
treemodel.TreeItem(atypedata, rootitem)
self.model = treemodel.TreeModel(rootitem)
self.atype_tablev.setModel(self.model)
def add_atype(self, ):
"""Add a atype and store it in the self.atypes
:returns: None
:rtype: None
:raises: None
"""
i = self.atype_tablev.currentIndex()
item = i.internalPointer()
if item:
atype = item.internal_data()
atype.projects.add(self._project)
self.atypes.append(atype)
item.set_parent(None)
class DepCreatorDialog(JB_Dialog, Ui_depcreator_dialog):
"""A Dialog to create a dep
"""
def __init__(self, projects=None, parent=None, flags=0):
"""Initialize a new dep creator dialog
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(DepCreatorDialog, self).__init__(parent, flags)
self.projects = projects or []
self.dep = None
self.setupUi(self)
self.create_pb.clicked.connect(self.create_dep)
def create_dep(self, ):
"""Create a dep and store it in the self.dep
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
short = self.short_le.text()
assetflag = self.asset_rb.isChecked()
ordervalue = self.ordervalue_sb.value()
desc = self.desc_pte.toPlainText()
try:
dep = djadapter.models.Department(name=name, short=short, assetflag=assetflag, ordervalue=ordervalue, description=desc)
dep.save()
for prj in self.projects:
dep.projects.add(prj)
self.dep = dep
self.accept()
except:
log.exception("Could not create new department.")
class DepAdderDialog(JB_Dialog, Ui_depadder_dialog):
"""A Dialog to add departments to a project
"""
def __init__(self, project, parent=None, flags=0):
"""Initialize a new dep creator dialog
:param project: The project for the deps
:type project: :class:`jukeboxcore.djadapter.models.Project`
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(DepAdderDialog, self).__init__(parent, flags)
self._project = project
self.deps = []
self.setupUi(self)
self.add_pb.clicked.connect(self.add_dep)
rootdata = treemodel.ListItemData(["Name", "Description", "Ordervalue"])
rootitem = treemodel.TreeItem(rootdata)
deps = djadapter.departments.exclude(projects=project)
for dep in deps:
depdata = djitemdata.DepartmentItemData(dep)
treemodel.TreeItem(depdata, rootitem)
self.model = treemodel.TreeModel(rootitem)
self.dep_tablev.setModel(self.model)
def add_dep(self, ):
"""Add a dep and store it in the self.deps
:returns: None
:rtype: None
:raises: None
"""
i = self.dep_tablev.currentIndex()
item = i.internalPointer()
if item:
dep = item.internal_data()
dep.projects.add(self._project)
self.deps.append(dep)
item.set_parent(None)
class UserCreatorDialog(JB_Dialog, Ui_usercreator_dialog):
"""A Dialog to create a user
"""
def __init__(self, projects=None, tasks=None, parent=None, flags=0):
"""Initialize a new user creator dialog
:param projects: The projects for the user
:type projects: list of :class:`jukeboxcore.djadapter.models.Project`
:param tasks: The tasks for the user
:type tasks: list of :class:`jukeboxcore.djadapter.models.Task`
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(UserCreatorDialog, self).__init__(parent, flags)
self.projects = projects or []
self.tasks = tasks or []
self.user = None
self.setupUi(self)
self.create_pb.clicked.connect(self.create_user)
def create_user(self, ):
"""Create a user and store it in the self.user
:returns: None
:rtype: None
:raises: None
"""
name = self.username_le.text()
if not name:
self.username_le.setPlaceholderText("Please provide a username.")
return
first = self.first_le.text()
last = self.last_le.text()
email = self.email_le.text()
try:
user = djadapter.models.User(username=name, first_name=first, last_name=last, email=email)
user.save()
for prj in self.projects:
prj.users.add(user)
for task in self.tasks:
task.users.add(user)
self.user = user
self.accept()
except:
log.exception("Could not create new assettype")
class UserAdderDialog(JB_Dialog, Ui_useradder_dialog):
"""A Dialog to add user to a project
"""
def __init__(self, project=None, task=None, parent=None, flags=0):
"""Initialize a new user creator dialog
:param project: The project for the users
:type project: :class:`jukeboxcore.djadapter.models.Project`
:param task: The task for the users
:type task: :class:`jukeboxcore.djadapter.models.Task`
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(UserAdderDialog, self).__init__(parent, flags)
self._project = project
self._task = task
self.users = []
self.setupUi(self)
self.add_pb.clicked.connect(self.add_user)
rootdata = treemodel.ListItemData(["Name", "Description"])
rootitem = treemodel.TreeItem(rootdata)
if project:
users = djadapter.users.exclude(project = project)
else:
users = djadapter.users.exclude(task = task)
for user in users:
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, rootitem)
self.model = treemodel.TreeModel(rootitem)
self.user_tablev.setModel(self.model)
def add_user(self, ):
"""Add a user and store it in the self.users
:returns: None
:rtype: None
:raises: None
"""
i = self.user_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
if self._project:
self._project.users.add(user)
else:
self._task.users.add(user)
self.users.append(user)
item.set_parent(None)
class ShotCreatorDialog(JB_Dialog, Ui_shotcreator_dialog):
"""A Dialog to create a shot
"""
def __init__(self, sequence, parent=None, flags=0):
"""Initialize a new shot creator dialog
:param sequence: the sequence for the shot
:type sequence: :class:`jukeboxcore.djadapter.models.Shot`
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(ShotCreatorDialog, self).__init__(parent, flags)
self.sequence = sequence
self.shot = None
self.setupUi(self)
self.create_pb.clicked.connect(self.create_shot)
def create_shot(self, ):
"""Create a shot and store it in the self.shot
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
if not name:
self.name_le.setPlaceholderText("Please enter a name!")
return
desc = self.desc_pte.toPlainText()
try:
shot = djadapter.models.Shot(sequence=self.sequence, project=self.sequence.project, name=name, description=desc)
shot.save()
self.shot = shot
self.accept()
except:
log.exception("Could not create new shot")
class AssetCreatorDialog(JB_Dialog, Ui_assetcreator_dialog):
"""A Dialog to create a asset
"""
def __init__(self, project, atype=None, parent=None, flags=0):
"""Initialize a new asset creator dialog
:param project: the project of the asset
:type project: :class:`jukeboxcore.djadapter.models.Project`
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(AssetCreatorDialog, self).__init__(parent, flags)
self.project = project
self.atype = atype
self.asset = None
self.setupUi(self)
if not self.atype:
self.atypes = list(project.atype_set.all())
atrootdata = treemodel.ListItemData(["Name"])
atrootitem = treemodel.TreeItem(atrootdata)
for at in self.atypes:
data = djitemdata.AtypeItemData(at)
treemodel.TreeItem(data, atrootitem)
self.atypemodel = treemodel.TreeModel(atrootitem)
self.atype_cb.setModel(self.atypemodel)
else:
self.atype_cb.setVisible(False)
self.atype_lb.setVisible(False)
self.create_pb.clicked.connect(self.create_asset)
def create_asset(self, ):
"""Create a asset and store it in the self.asset
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
if not name:
self.name_le.setPlaceholderText("Please enter a name!")
return
desc = self.desc_pte.toPlainText()
if not self.atype:
atypei = self.atype_cb.currentIndex()
assert atypei >= 0
self.atype = self.atypes[atypei]
try:
asset = djadapter.models.Asset(atype=self.atype, project=self.project, name=name, description=desc)
asset.save()
self.asset = asset
self.accept()
except:
log.exception("Could not create new asset")
class AssetAdderDialog(JB_Dialog, Ui_assetadder_dialog):
"""A Dialog to add asset to a project
"""
def __init__(self, shot=None, asset=None, parent=None, flags=0):
"""Initialize a new asset creator dialog
:param shot: The shot for the assets
:type shot: :class:`jukeboxcore.djadapter.models.Shot`
:param asset: The asset for the assets
:type asset: :class:`jukeboxcore.djadapter.models.Asset`
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(AssetAdderDialog, self).__init__(parent, flags)
self._shot = shot
self._asset = asset
self.assets = []
self.setupUi(self)
self.add_pb.clicked.connect(self.add_asset)
rootdata = treemodel.ListItemData(["Name"])
rootitem = treemodel.TreeItem(rootdata)
self.model = treemodel.TreeModel(rootitem)
self.asset_treev.setModel(self.model)
atypes = {}
if shot:
assets = djadapter.assets.exclude(pk__in = shot.assets.all()).filter(project=shot.project)
else:
assets = djadapter.assets.exclude(pk__in = asset.assets.all()).filter(project=asset.project)
for asset in assets:
atype = asset.atype
atypeitem = atypes.get(atype)
if not atypeitem:
atypedata = djitemdata.AtypeItemData(atype)
atypeitem = treemodel.TreeItem(atypedata, rootitem)
atypes[atype] = atypeitem
assetdata = djitemdata.AssetItemData(asset)
treemodel.TreeItem(assetdata, atypeitem)
def add_asset(self, ):
"""Add a asset and store it in the self.assets
:returns: None
:rtype: None
:raises: None
"""
i = self.asset_treev.currentIndex()
item = i.internalPointer()
if item:
asset = item.internal_data()
if not isinstance(asset, djadapter.models.Asset):
return
if self._shot:
self._shot.assets.add(asset)
else:
self._asset.assets.add(asset)
self.assets.append(asset)
item.set_parent(None)
class TaskCreatorDialog(JB_Dialog, Ui_taskcreator_dialog):
"""A Dialog to create a task
"""
def __init__(self, element, parent=None, flags=0):
"""Initialize a new task creator dialog
:param element: the element for the task
:type element: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot`
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(TaskCreatorDialog, self).__init__(parent, flags)
self.element = element
self.task = None
self.setupUi(self)
self.create_pb.clicked.connect(self.create_task)
qs = djadapter.departments.filter(projects=element.project).exclude(pk__in = element.tasks.all().values_list('department', flat=True))
qs = qs.filter(assetflag=isinstance(element, djadapter.models.Asset))
self.deps = list(qs)
atrootdata = treemodel.ListItemData(["Name"])
atrootitem = treemodel.TreeItem(atrootdata)
for dep in self.deps:
data = djitemdata.DepartmentItemData(dep)
treemodel.TreeItem(data, atrootitem)
self.model = treemodel.TreeModel(atrootitem)
self.dep_cb.setModel(self.model)
def create_task(self, ):
"""Create a task and store it in the self.task
:returns: None
:rtype: None
:raises: None
"""
depi = self.dep_cb.currentIndex()
assert depi >= 0
dep = self.deps[depi]
deadline = self.deadline_de.dateTime().toPython()
try:
task = djadapter.models.Task(department=dep, project=self.element.project, element=self.element, deadline=deadline)
task.save()
self.task = task
self.accept()
except:
log.exception("Could not create new task")
class GuerillaMGMTWin(JB_MainWindow, Ui_guerillamgmt_mwin):
"""A tool for creating entries in the database and a little project management.
"""
def __init__(self, parent=None, flags=0):
"""Initialize a new GuerillaMGMTwin
:param parent: the parent object
:type parent: :class:`QtCore.QObject`
:param flags: the window flags
:type flags: :data:`QtCore.Qt.WindowFlags`
:raises: None
"""
super(GuerillaMGMTWin, self).__init__(parent, flags)
self.cur_prj = None
self.cur_seq = None
self.cur_shot = None
self.cur_atype = None
self.cur_asset = None
self.cur_dep = None
self.cur_task = None
self.cur_user = None
self.setupUi(self)
self.setup_ui()
try:
self.setup_signals()
except:
log.exception("Exception setting up signals")
def setup_ui(self, ):
"""Create all necessary ui elements for the tool
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up the ui")
self.setup_prjs_page()
self.setup_prj_page()
self.setup_seq_page()
self.setup_shot_page()
self.setup_atype_page()
self.setup_asset_page()
self.setup_dep_page()
self.setup_task_page()
self.setup_users_page()
self.setup_user_page()
def setup_prjs_page(self, ):
"""Create and set the model on the projects page
:returns: None
:rtype: None
:raises: None
"""
self.prjs_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
log.debug("Loading projects for projects page.")
rootdata = treemodel.ListItemData(['Name', 'Short', 'Path', 'Created', 'Semester', 'Status', 'Resolution', 'FPS', 'Scale'])
rootitem = treemodel.TreeItem(rootdata)
prjs = djadapter.projects.all()
for prj in prjs:
prjdata = djitemdata.ProjectItemData(prj)
treemodel.TreeItem(prjdata, rootitem)
self.prjs_model = treemodel.TreeModel(rootitem)
self.prjs_tablev.setModel(self.prjs_model)
def setup_prj_page(self, ):
"""Create and set the model on the project page
:returns: None
:rtype: None
:raises: None
"""
self.prj_seq_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.prj_atype_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.prj_dep_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.prj_user_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
def setup_seq_page(self, ):
"""Create and set the model on the sequence page
:returns: None
:rtype: None
:raises: None
"""
self.seq_shot_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
def setup_shot_page(self, ):
"""Create and set the model on the shot page
:returns: None
:rtype: None
:raises: None
"""
self.shot_asset_treev.header().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.shot_task_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
def setup_atype_page(self, ):
"""Create and set the model on the atype page
:returns: None
:rtype: None
:raises: None
"""
pass
def setup_asset_page(self, ):
"""Create and set the model on the asset page
:returns: None
:rtype: None
:raises: None
"""
self.asset_asset_treev.header().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.asset_task_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
def setup_dep_page(self, ):
"""Create and set the model on the department page
:returns: None
:rtype: None
:raises: None
"""
self.dep_prj_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
def setup_task_page(self, ):
"""Create and set the model on the task page
:returns: None
:rtype: None
:raises: None
"""
self.task_user_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
def setup_users_page(self, ):
"""Create and set the model on the users page
:returns: None
:rtype: None
:raises: None
"""
self.users_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
log.debug("Loading users for users page.")
rootdata = treemodel.ListItemData(['Username', 'First', 'Last', 'Email'])
rootitem = treemodel.TreeItem(rootdata)
users = djadapter.users.all()
for usr in users:
usrdata = djitemdata.UserItemData(usr)
treemodel.TreeItem(usrdata, rootitem)
self.users_model = treemodel.TreeModel(rootitem)
self.users_tablev.setModel(self.users_model)
def setup_user_page(self, ):
"""Create and set the model on the user page
:returns: None
:rtype: None
:raises: None
"""
self.user_prj_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.user_task_treev.header().setResizeMode(QtGui.QHeaderView.ResizeToContents)
def setup_signals(self, ):
"""Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up signals.")
self.setup_prjs_signals()
self.setup_prj_signals()
self.setup_seq_signals()
self.setup_shot_signals()
self.setup_atype_signals()
self.setup_asset_signals()
self.setup_dep_signals()
self.setup_task_signals()
self.setup_users_signals()
self.setup_user_signals()
log.debug("Signals are set up.")
def setup_prjs_signals(self, ):
"""Setup the signals for the projects page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up projects page signals.")
self.prjs_prj_view_pb.clicked.connect(self.prjs_view_prj)
self.prjs_prj_create_pb.clicked.connect(self.prjs_create_prj)
def setup_prj_signals(self, ):
"""Setup the signals for the project page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up project page signals.")
self.prj_seq_view_pb.clicked.connect(self.prj_view_seq)
self.prj_seq_create_pb.clicked.connect(self.prj_create_seq)
self.prj_atype_view_pb.clicked.connect(self.prj_view_atype)
self.prj_atype_add_pb.clicked.connect(self.prj_add_atype)
self.prj_atype_create_pb.clicked.connect(self.prj_create_atype)
self.prj_dep_view_pb.clicked.connect(self.prj_view_dep)
self.prj_dep_add_pb.clicked.connect(self.prj_add_dep)
self.prj_dep_create_pb.clicked.connect(self.prj_create_dep)
self.prj_user_view_pb.clicked.connect(self.prj_view_user)
self.prj_user_add_pb.clicked.connect(self.prj_add_user)
self.prj_user_remove_pb.clicked.connect(self.prj_remove_user)
self.prj_user_create_pb.clicked.connect(self.prj_create_user)
self.prj_path_view_pb.clicked.connect(self.prj_show_path)
self.prj_desc_pte.textChanged.connect(self.prj_save)
self.prj_semester_le.editingFinished.connect(self.prj_save)
self.prj_fps_dsb.valueChanged.connect(self.prj_save)
self.prj_res_x_sb.valueChanged.connect(self.prj_save)
self.prj_res_y_sb.valueChanged.connect(self.prj_save)
self.prj_scale_cb.currentIndexChanged.connect(self.prj_save)
def setup_seq_signals(self, ):
"""Setup the signals for the sequence page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up sequence page signals.")
self.seq_prj_view_pb.clicked.connect(self.seq_view_prj)
self.seq_shot_view_pb.clicked.connect(self.seq_view_shot)
self.seq_shot_create_pb.clicked.connect(self.seq_create_shot)
self.seq_desc_pte.textChanged.connect(self.seq_save)
def setup_shot_signals(self, ):
"""Setup the signals for the shot page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up shot page signals.")
self.shot_prj_view_pb.clicked.connect(self.shot_view_prj)
self.shot_seq_view_pb.clicked.connect(self.shot_view_seq)
self.shot_asset_view_pb.clicked.connect(self.shot_view_asset)
self.shot_asset_create_pb.clicked.connect(self.shot_create_asset)
self.shot_asset_add_pb.clicked.connect(self.shot_add_asset)
self.shot_asset_remove_pb.clicked.connect(self.shot_remove_asset)
self.shot_task_view_pb.clicked.connect(self.shot_view_task)
self.shot_task_create_pb.clicked.connect(self.shot_create_task)
self.shot_start_sb.valueChanged.connect(self.shot_save)
self.shot_end_sb.valueChanged.connect(self.shot_save)
self.shot_handle_sb.valueChanged.connect(self.shot_save)
self.shot_desc_pte.textChanged.connect(self.shot_save)
def setup_atype_signals(self, ):
"""Setup the signals for the assettype page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up atype page signals.")
self.asset_prj_view_pb.clicked.connect(self.asset_view_prj)
self.asset_atype_view_pb.clicked.connect(self.asset_view_atype)
self.atype_asset_view_pb.clicked.connect(self.atype_view_asset)
self.atype_asset_create_pb.clicked.connect(self.atype_create_asset)
self.atype_desc_pte.textChanged.connect(self.atype_save)
def setup_asset_signals(self, ):
"""Setup the signals for the asset page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up asset signals.")
self.asset_asset_view_pb.clicked.connect(self.asset_view_asset)
self.asset_asset_create_pb.clicked.connect(self.asset_create_asset)
self.asset_asset_add_pb.clicked.connect(self.asset_add_asset)
self.asset_asset_remove_pb.clicked.connect(self.asset_remove_asset)
self.asset_task_view_pb.clicked.connect(self.asset_view_task)
self.asset_task_create_pb.clicked.connect(self.asset_create_task)
self.asset_desc_pte.textChanged.connect(self.asset_save)
def setup_dep_signals(self, ):
"""Setup the signals for the department page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up department page signals.")
self.dep_prj_view_pb.clicked.connect(self.dep_view_prj)
self.dep_prj_add_pb.clicked.connect(self.dep_add_prj)
self.dep_prj_remove_pb.clicked.connect(self.dep_remove_prj)
self.dep_desc_pte.textChanged.connect(self.dep_save)
self.dep_ordervalue_sb.valueChanged.connect(self.dep_save)
def setup_task_signals(self, ):
"""Setup the signals for the task page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up task page signals.")
self.task_user_view_pb.clicked.connect(self.task_view_user)
self.task_user_add_pb.clicked.connect(self.task_add_user)
self.task_user_remove_pb.clicked.connect(self.task_remove_user)
self.task_dep_view_pb.clicked.connect(self.task_view_dep)
self.task_link_view_pb.clicked.connect(self.task_view_link)
self.task_deadline_de.dateChanged.connect(self.task_save)
self.task_status_cb.currentIndexChanged.connect(self.task_save)
def setup_users_signals(self, ):
"""Setup the signals for the users page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up users page signals.")
self.users_user_view_pb.clicked.connect(self.users_view_user)
self.users_user_create_pb.clicked.connect(self.create_user)
def setup_user_signals(self, ):
"""Setup the signals for the user page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up user page signals.")
self.user_task_view_pb.clicked.connect(self.user_view_task)
self.user_prj_view_pb.clicked.connect(self.user_view_prj)
self.user_prj_add_pb.clicked.connect(self.user_add_prj)
self.user_prj_remove_pb.clicked.connect(self.user_remove_prj)
self.user_username_le.editingFinished.connect(self.user_save)
self.user_first_le.editingFinished.connect(self.user_save)
self.user_last_le.editingFinished.connect(self.user_save)
self.user_email_le.editingFinished.connect(self.user_save)
def prjs_view_prj(self, *args, **kwargs):
"""View the, in the projects table view selected, project.
:returns: None
:rtype: None
:raises: None
"""
i = self.prjs_tablev.currentIndex()
item = i.internalPointer()
if item:
prj = item.internal_data()
self.view_prj(prj)
def prjs_create_prj(self, *args, **kwargs):
"""Create a new project
:returns: None
:rtype: None
:raises: None
"""
self.create_prj()
def view_prj(self, prj):
"""View the given project on the project page
:param prj: the project to view
:type prj: :class:`jukeboxcore.djadapter.models.Project`
:returns: None
:rtype: None
:raises: None
"""
log.debug('Viewing project %s', prj.name)
self.cur_prj = None
self.pages_tabw.setCurrentIndex(1)
self.prj_name_le.setText(prj.name)
self.prj_short_le.setText(prj.short)
self.prj_path_le.setText(prj.path)
self.prj_desc_pte.setPlainText(prj.description)
self.prj_created_dte.setDateTime(dt_to_qdatetime(prj.date_created))
self.prj_semester_le.setText(prj.semester)
self.prj_fps_dsb.setValue(prj.framerate)
self.prj_res_x_sb.setValue(prj.resx)
self.prj_res_y_sb.setValue(prj.resy)
scalemap = {"m": 2, "meter": 2, "mm": 0, "millimeter": 0, "cm": 1, "centimeter": 1,
"km": 3, "kilometer": 3, "inch": 4, "foot": 5, "yard": 6, "mile": 7}
scaleindex = scalemap.get(prj.scale, -1)
log.debug("Setting index of project scale combobox to %s. Scale is %s", scaleindex, prj.scale)
self.prj_scale_cb.setCurrentIndex(scaleindex)
seqrootdata = treemodel.ListItemData(['Name', "Description"])
seqrootitem = treemodel.TreeItem(seqrootdata)
for seq in prj.sequence_set.all():
seqdata = djitemdata.SequenceItemData(seq)
treemodel.TreeItem(seqdata, seqrootitem)
self.prj_seq_model = treemodel.TreeModel(seqrootitem)
self.prj_seq_tablev.setModel(self.prj_seq_model)
atyperootdata = treemodel.ListItemData(['Name', "Description"])
atyperootitem = treemodel.TreeItem(atyperootdata)
for atype in prj.atype_set.all():
atypedata = djitemdata.AtypeItemData(atype)
treemodel.TreeItem(atypedata, atyperootitem)
self.prj_atype_model = treemodel.TreeModel(atyperootitem)
self.prj_atype_tablev.setModel(self.prj_atype_model)
deprootdata = treemodel.ListItemData(['Name', "Description", "Ordervalue"])
deprootitem = treemodel.TreeItem(deprootdata)
for dep in prj.department_set.all():
depdata = djitemdata.DepartmentItemData(dep)
treemodel.TreeItem(depdata, deprootitem)
self.prj_dep_model = treemodel.TreeModel(deprootitem)
self.prj_dep_tablev.setModel(self.prj_dep_model)
userrootdata = treemodel.ListItemData(['Username', 'First', 'Last', 'Email'])
userrootitem = treemodel.TreeItem(userrootdata)
for user in prj.users.all():
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, userrootitem)
self.prj_user_model = treemodel.TreeModel(userrootitem)
self.prj_user_tablev.setModel(self.prj_user_model)
self.cur_prj = prj
def create_prj(self, atypes=None, deps=None):
"""Create and return a new project
:param atypes: add the given atypes to the project
:type atypes: list | None
:param deps: add the given departmetns to the project
:type deps: list | None
:returns: The created project or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Project`
:raises: None
"""
dialog = ProjectCreatorDialog(parent=self)
dialog.exec_()
prj = dialog.project
if prj and atypes:
for at in atypes:
at.projects.add(prj)
at.save()
if prj and deps:
for dep in deps:
dep.projects.add(prj)
dep.save()
if prj:
prjdata = djitemdata.ProjectItemData(prj)
treemodel.TreeItem(prjdata, self.prjs_model.root)
return prj
def prj_view_seq(self, *args, **kwargs):
"""View the, in the prj_seq_tablev selected, sequence.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
i = self.prj_seq_tablev.currentIndex()
item = i.internalPointer()
if item:
seq = item.internal_data()
self.view_seq(seq)
def prj_create_seq(self, *args, **kwargs):
"""Create a new Sequence for the current project
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
seq = self.create_seq(project=self.cur_prj)
if seq:
seqdata = djitemdata.SequenceItemData(seq)
treemodel.TreeItem(seqdata, self.prj_seq_model.root)
def view_seq(self, seq):
"""View the given sequence on the sequence page
:param seq: the sequence to view
:type seq: :class:`jukeboxcore.djadapter.models.Sequence`
:returns: None
:rtype: None
:raises: None
"""
log.debug('Viewing sequence %s', seq.name)
self.cur_seq = None
self.pages_tabw.setCurrentIndex(2)
self.seq_name_le.setText(seq.name)
self.seq_prj_le.setText(seq.project.name)
self.seq_desc_pte.setPlainText(seq.description)
shotrootdata = treemodel.ListItemData(['Name', "Description", "Duration", "Start", "End"])
shotrootitem = treemodel.TreeItem(shotrootdata)
for shot in seq.shot_set.all():
shotdata = djitemdata.ShotItemData(shot)
treemodel.TreeItem(shotdata, shotrootitem)
self.seq_shot_model = treemodel.TreeModel(shotrootitem)
self.seq_shot_tablev.setModel(self.seq_shot_model)
self.cur_seq = seq
def create_seq(self, project):
"""Create and return a new sequence
:param project: the project for the sequence
:type deps: :class:`jukeboxcore.djadapter.models.Project`
:returns: The created sequence or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Sequence`
:raises: None
"""
dialog = SequenceCreatorDialog(project=project, parent=self)
dialog.exec_()
seq = dialog.sequence
return seq
def prj_view_atype(self, *args, **kwargs):
"""View the, in the atype table view selected, assettype.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
i = self.prj_atype_tablev.currentIndex()
item = i.internalPointer()
if item:
atype = item.internal_data()
self.view_atype(atype)
def prj_add_atype(self, *args, **kwargs):
"""Add more assettypes to the project.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
dialog = AtypeAdderDialog(project=self.cur_prj)
dialog.exec_()
atypes = dialog.atypes
for atype in atypes:
atypedata = djitemdata.AtypeItemData(atype)
treemodel.TreeItem(atypedata, self.prj_atype_model.root)
def prj_create_atype(self, *args, **kwargs):
"""Create a new project
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
atype = self.create_atype(projects=[self.cur_prj])
if atype:
atypedata = djitemdata.AtypeItemData(atype)
treemodel.TreeItem(atypedata, self.prj_atype_model.root)
def create_atype(self, projects):
"""Create and return a new atype
:param projects: the projects for the atype
:type projects: :class:`jukeboxcore.djadapter.models.Project`
:returns: The created atype or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Atype`
:raises: None
"""
dialog = AtypeCreatorDialog(projects=projects, parent=self)
dialog.exec_()
atype = dialog.atype
return atype
def view_atype(self, atype):
"""View the given atype on the atype page
:param atype: the atype to view
:type atype: :class:`jukeboxcore.djadapter.models.Atype`
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
log.debug('Viewing atype %s', atype.name)
self.cur_atype = None
self.pages_tabw.setCurrentIndex(4)
self.atype_name_le.setText(atype.name)
self.atype_desc_pte.setPlainText(atype.description)
assetrootdata = treemodel.ListItemData(['Name', 'Description'])
assetrootitem = treemodel.TreeItem(assetrootdata)
self.atype_asset_model = treemodel.TreeModel(assetrootitem)
self.atype_asset_treev.setModel(self.atype_asset_model)
for a in djadapter.assets.filter(project=self.cur_prj, atype=atype):
assetdata = djitemdata.AssetItemData(a)
treemodel.TreeItem(assetdata, assetrootitem)
self.cur_atype = atype
def prj_view_dep(self, *args, **kwargs):
"""View the, in the dep table view selected, department.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
i = self.prj_dep_tablev.currentIndex()
item = i.internalPointer()
if item:
dep = item.internal_data()
self.view_dep(dep)
def prj_add_dep(self, *args, **kwargs):
"""Add more departments to the project.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
dialog = DepAdderDialog(project=self.cur_prj)
dialog.exec_()
deps = dialog.deps
for dep in deps:
depdata = djitemdata.DepartmentItemData(dep)
treemodel.TreeItem(depdata, self.prj_dep_model.root)
def prj_create_dep(self, *args, **kwargs):
"""Create a new project
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
dep = self.create_dep(projects=[self.cur_prj])
if dep:
depdata = djitemdata.DepartmentItemData(dep)
treemodel.TreeItem(depdata, self.prj_dep_model.root)
def create_dep(self, projects):
"""Create and return a new dep
:param projects: the projects for the dep
:type projects: :class:`jukeboxcore.djadapter.models.Project`
:returns: The created dep or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Dep`
:raises: None
"""
dialog = DepCreatorDialog(projects=projects, parent=self)
dialog.exec_()
dep = dialog.dep
return dep
def view_dep(self, dep):
"""View the given department on the department page
:param dep: the dep to view
:type dep: :class:`jukeboxcore.djadapter.models.Department`
:returns: None
:rtype: None
:raises: None
"""
log.debug('Viewing department %s', dep.name)
self.cur_dep = None
self.pages_tabw.setCurrentIndex(6)
self.dep_name_le.setText(dep.name)
self.dep_short_le.setText(dep.short)
self.dep_shot_rb.setChecked(not dep.assetflag)
self.dep_asset_rb.setChecked(dep.assetflag)
self.dep_ordervalue_sb.setValue(dep.ordervalue)
self.dep_desc_pte.setPlainText(dep.description)
rootdata = treemodel.ListItemData(['Name', 'Short', 'Path', 'Created', 'Semester', 'Status', 'Resolution', 'FPS', 'Scale'])
rootitem = treemodel.TreeItem(rootdata)
prjs = dep.projects.all()
for prj in prjs:
prjdata = djitemdata.ProjectItemData(prj)
treemodel.TreeItem(prjdata, rootitem)
self.dep_prj_model = treemodel.TreeModel(rootitem)
self.dep_prj_tablev.setModel(self.dep_prj_model)
self.cur_dep = dep
def prj_view_user(self, *args, **kwargs):
"""View the, in the user table view selected, user.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
i = self.prj_user_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
self.view_user(user)
def prj_add_user(self, *args, **kwargs):
"""Add more users to the project.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
dialog = UserAdderDialog(project=self.cur_prj)
dialog.exec_()
users = dialog.users
for user in users:
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, self.prj_user_model.root)
self.cur_prj.save()
def prj_remove_user(self, *args, **kwargs):
"""Remove the, in the user table view selected, user.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
i = self.prj_user_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
log.debug("Removing user %s.", user.username)
item.set_parent(None)
self.cur_prj.users.remove(user)
def prj_create_user(self, *args, **kwargs):
"""Create a new project
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
user = self.create_user(projects=[self.cur_prj])
if user:
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, self.prj_user_model.root)
def create_user(self, projects=None, tasks=None):
"""Create and return a new user
:param projects: the projects for the user
:type projects: list of :class:`jukeboxcore.djadapter.models.Project`
:param tasks: the tasks for the user
:type tasks: list of :class:`jukeboxcore.djadapter.models.Task`
:returns: The created user or None
:rtype: None | :class:`jukeboxcore.djadapter.models.User`
:raises: None
"""
projects = projects or []
tasks = tasks or []
dialog = UserCreatorDialog(projects=projects, tasks=tasks, parent=self)
dialog.exec_()
user = dialog.user
if user:
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, self.users_model.root)
return user
def view_user(self, user):
"""View the given user on the user page
:param user: the user to view
:type user: :class:`jukeboxcore.djadapter.models.User`
:returns: None
:rtype: None
:raises: None
"""
log.debug('Viewing user %s', user.username)
self.cur_user = None
self.pages_tabw.setCurrentIndex(9)
self.user_username_le.setText(user.username)
self.user_first_le.setText(user.first_name)
self.user_last_le.setText(user.last_name)
self.user_email_le.setText(user.email)
prjrootdata = treemodel.ListItemData(['Name', 'Short', 'Path', 'Created', 'Semester', 'Status', 'Resolution', 'FPS', 'Scale'])
prjrootitem = treemodel.TreeItem(prjrootdata)
prjs = djadapter.projects.filter(users=user)
for prj in prjs:
prjdata = djitemdata.ProjectItemData(prj)
treemodel.TreeItem(prjdata, prjrootitem)
self.user_prj_model = treemodel.TreeModel(prjrootitem)
self.user_prj_tablev.setModel(self.user_prj_model)
taskrootdata = treemodel.ListItemData(['Name'])
taskrootitem = treemodel.TreeItem(taskrootdata)
self.user_task_model = treemodel.TreeModel(taskrootitem)
self.user_task_treev.setModel(self.user_task_model)
tasks = djadapter.tasks.filter(users=user)
assets = {}
shots = {}
atypes = {}
seqs = {}
prjs = {}
for t in tasks:
tdata = djitemdata.TaskItemData(t)
titem = treemodel.TreeItem(tdata)
e = t.element
if isinstance(e, djadapter.models.Asset):
eitem = assets.get(e)
if not eitem:
edata = djitemdata.AssetItemData(e)
eitem = treemodel.TreeItem(edata)
assets[e] = eitem
egrp = e.atype
egrpitem = atypes.get(egrp)
if not egrpitem:
egrpdata = djitemdata.AtypeItemData(egrp)
egrpitem = treemodel.TreeItem(egrpdata)
atypes[egrp] = egrpitem
else:
eitem = shots.get(e)
if not eitem:
edata = djitemdata.ShotItemData(e)
eitem = treemodel.TreeItem(edata)
shots[e] = eitem
egrp = e.sequence
egrpitem = seqs.get(egrp)
if not egrpitem:
egrpdata = djitemdata.SequenceItemData(egrp)
egrpitem = treemodel.TreeItem(egrpdata)
seqs[egrp] = egrpitem
if eitem not in egrpitem.childItems:
eitem.set_parent(egrpitem)
prj = egrp.project
prjitem = prjs.get(prj)
if not prjitem:
prjdata = djitemdata.ProjectItemData(prj)
prjitem = treemodel.TreeItem(prjdata, taskrootitem)
prjs[prj] = prjitem
assetdata = treemodel.ListItemData(["Asset"])
assetitem = treemodel.TreeItem(assetdata, prjitem)
shotdata = treemodel.ListItemData(["Shot"])
shotitem = treemodel.TreeItem(shotdata, prjitem)
else:
assetitem = prjitem.child(0)
shotitem = prjitem.child(1)
if isinstance(egrp, djadapter.models.Atype) and egrpitem not in assetitem.childItems:
egrpitem.set_parent(assetitem)
elif isinstance(egrp, djadapter.models.Sequence) and egrpitem not in shotitem.childItems:
egrpitem.set_parent(shotitem)
titem.set_parent(eitem)
self.cur_user = user
def prj_show_path(self, ):
"""Show the dir in the a filebrowser of the project
:returns: None
:rtype: None
:raises: None
"""
f = self.prj_path_le.text()
osinter = ostool.get_interface()
osinter.open_path(f)
def prj_save(self):
"""Save the current project
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
desc = self.prj_desc_pte.toPlainText()
semester = self.prj_semester_le.text()
fps = self.prj_fps_dsb.value()
resx = self.prj_res_x_sb.value()
resy = self.prj_res_y_sb.value()
scale = self.prj_scale_cb.currentText()
self.cur_prj.description = desc
self.cur_prj.semester = semester
self.cur_prj.framerate = fps
self.cur_prj.resx = resx
self.cur_prj.resy = resy
self.cur_prj.scale = scale
self.cur_prj.save()
def seq_save(self):
"""Save the current sequence
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_seq:
return
desc = self.seq_desc_pte.toPlainText()
self.cur_seq.description = desc
self.cur_seq.save()
def seq_view_prj(self, ):
"""View the project or the current sequence
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_seq:
return
self.view_prj(self.cur_seq.project)
def seq_view_shot(self, ):
"""View the shot that is selected in the table view of the sequence page
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_seq:
return
i = self.seq_shot_tablev.currentIndex()
item = i.internalPointer()
if item:
shot = item.internal_data()
self.view_shot(shot)
def seq_create_shot(self, *args, **kwargs):
"""Create a new shot
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_seq:
return
shot = self.create_shot(sequence=self.cur_seq)
if shot:
shotdata = djitemdata.ShotItemData(shot)
treemodel.TreeItem(shotdata, self.seq_shot_model.root)
def view_shot(self, shot):
"""View the given shot
:param shot: the shot to view
:type shot: :class:`jukeboxcore.djadapter.models.Shot`
:returns: None
:rtype: None
:raises: None
"""
log.debug('Viewing shot %s', shot.name)
self.cur_shot = None
self.pages_tabw.setCurrentIndex(3)
self.shot_name_le.setText(shot.name)
self.shot_prj_le.setText(shot.project.name)
self.shot_seq_le.setText(shot.sequence.name)
self.shot_start_sb.setValue(shot.startframe)
self.shot_end_sb.setValue(shot.endframe)
self.shot_handle_sb.setValue(shot.handlesize)
self.shot_desc_pte.setPlainText(shot.description)
assetsrootdata = treemodel.ListItemData(["Name", "Description"])
assetsrootitem = treemodel.TreeItem(assetsrootdata)
self.shot_asset_model = treemodel.TreeModel(assetsrootitem)
self.shot_asset_treev.setModel(self.shot_asset_model)
atypes = {}
assets = shot.assets.all()
for a in assets:
atype = a.atype
atypeitem = atypes.get(atype)
if not atypeitem:
atypedata = djitemdata.AtypeItemData(atype)
atypeitem = treemodel.TreeItem(atypedata, assetsrootitem)
atypes[atype] = atypeitem
assetdata = djitemdata.AssetItemData(a)
treemodel.TreeItem(assetdata, atypeitem)
tasksrootdata = treemodel.ListItemData(["Name", "Short"])
tasksrootitem = treemodel.TreeItem(tasksrootdata)
self.shot_task_model = treemodel.TreeModel(tasksrootitem)
self.shot_task_tablev.setModel(self.shot_task_model)
tasks = shot.tasks.all()
for t in tasks:
tdata = djitemdata.TaskItemData(t)
treemodel.TreeItem(tdata, tasksrootitem)
self.cur_shot = shot
def create_shot(self, sequence):
"""Create and return a new shot
:param sequence: the sequence for the shot
:type sequence: :class:`jukeboxcore.djadapter.models.Sequence`
:returns: The created shot or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Shot`
:raises: None
"""
dialog = ShotCreatorDialog(sequence=sequence, parent=self)
dialog.exec_()
shot = dialog.shot
return shot
def shot_view_prj(self, ):
"""View the project of the current shot
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
self.view_prj(self.cur_shot.project)
def shot_view_seq(self, ):
"""View the sequence of the current shot
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
self.view_seq(self.cur_shot.sequence)
def shot_view_task(self, ):
"""View the task that is currently selected on the shot page
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
i = self.shot_task_tablev.currentIndex()
item = i.internalPointer()
if item:
task = item.internal_data()
self.view_task(task)
def shot_view_asset(self, ):
"""View the task that is currently selected on the shot page
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
i = self.shot_asset_treev.currentIndex()
item = i.internalPointer()
if item:
asset = item.internal_data()
if isinstance(asset, djadapter.models.Asset):
self.view_asset(asset)
def shot_create_task(self, *args, **kwargs):
"""Create a new task
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
task = self.create_task(element=self.cur_shot)
if task:
taskdata = djitemdata.TaskItemData(task)
treemodel.TreeItem(taskdata, self.shot_task_model.root)
def create_task(self, element):
"""Create a new task for the given element
:param element: the element for the task
:type element: :class:`jukeboxcore.djadapter.models.Shot` | :class:`jukeboxcore.djadapter.models.Asset`
:returns: None
:rtype: None
:raises: None
"""
dialog = TaskCreatorDialog(element=element, parent=self)
dialog.exec_()
task = dialog.task
return task
def view_asset(self, asset):
"""View the given asset
:param asset: the asset to view
:type asset: :class:`jukeboxcore.djadapter.models.Asset`
:returns: None
:rtype: None
:raises: None
"""
log.debug('Viewing asset %s', asset.name)
self.cur_asset = None
self.pages_tabw.setCurrentIndex(5)
name = asset.name
prj = asset.project.name
atype = asset.atype.name
desc = asset.description
self.asset_name_le.setText(name)
self.asset_prj_le.setText(prj)
self.asset_atype_le.setText(atype)
self.asset_desc_pte.setPlainText(desc)
assetsrootdata = treemodel.ListItemData(["Name", "Description"])
assetsrootitem = treemodel.TreeItem(assetsrootdata)
self.asset_asset_model = treemodel.TreeModel(assetsrootitem)
self.asset_asset_treev.setModel(self.asset_asset_model)
atypes = {}
assets = asset.assets.all()
for a in assets:
atype = a.atype
atypeitem = atypes.get(atype)
if not atypeitem:
atypedata = djitemdata.AtypeItemData(atype)
atypeitem = treemodel.TreeItem(atypedata, assetsrootitem)
atypes[atype] = atypeitem
assetdata = djitemdata.AssetItemData(a)
treemodel.TreeItem(assetdata, atypeitem)
tasksrootdata = treemodel.ListItemData(["Name", "Short"])
tasksrootitem = treemodel.TreeItem(tasksrootdata)
self.asset_task_model = treemodel.TreeModel(tasksrootitem)
self.asset_task_tablev.setModel(self.asset_task_model)
tasks = asset.tasks.all()
for t in tasks:
tdata = djitemdata.TaskItemData(t)
treemodel.TreeItem(tdata, tasksrootitem)
self.cur_asset = asset
def shot_add_asset(self, *args, **kwargs):
"""Add more assets to the shot.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
dialog = AssetAdderDialog(shot=self.cur_shot)
dialog.exec_()
assets = dialog.assets
atypes = {}
for c in self.shot_asset_model.root.childItems:
atypes[c.internal_data()] = c
for asset in assets:
atypeitem = atypes.get(asset.atype)
if not atypeitem:
atypedata = djitemdata.AtypeItemData(asset.atype)
atypeitem = treemodel.TreeItem(atypedata, self.shot_asset_model.root)
atypes[asset.atype] = atypeitem
assetdata = djitemdata.AssetItemData(asset)
treemodel.TreeItem(assetdata, atypeitem)
self.cur_shot.save()
def shot_remove_asset(self, *args, **kwargs):
"""Remove the, in the asset table view selected, asset.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
i = self.shot_asset_treev.currentIndex()
item = i.internalPointer()
if item:
asset = item.internal_data()
if not isinstance(asset, djadapter.models.Asset):
return
log.debug("Removing asset %s.", asset.name)
item.set_parent(None)
self.cur_shot.assets.remove(asset)
def shot_create_asset(self, *args, **kwargs):
"""Create a new shot
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
asset = self.create_asset(project=self.cur_shot.project, shot=self.cur_shot)
if not asset:
return
atypes = {}
for c in self.shot_asset_model.root.childItems:
atypes[c.internal_data()] = c
atypeitem = atypes.get(asset.atype)
if not atypeitem:
atypedata = djitemdata.AtypeItemData(asset.atype)
atypeitem = treemodel.TreeItem(atypedata, self.shot_asset_model.root)
atypes[asset.atype] = atypeitem
assetdata = djitemdata.AssetItemData(asset)
treemodel.TreeItem(assetdata, atypeitem)
def create_asset(self, project, atype=None, shot=None, asset=None):
"""Create and return a new asset
:param project: the project for the asset
:type project: :class:`jukeboxcore.djadapter.models.Project`
:param atype: the assettype of the asset
:type atype: :class:`jukeboxcore.djadapter.models.Atype`
:param shot: the shot to add the asset to
:type shot: :class:`jukeboxcore.djadapter.models.Shot`
:param asset: the asset to add the new asset to
:type asset: :class:`jukeboxcore.djadapter.models.Asset`
:returns: The created asset or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Asset`
:raises: None
"""
element = shot or asset
dialog = AssetCreatorDialog(project=project, atype=atype, parent=self)
dialog.exec_()
asset = dialog.asset
if not atype:
element.assets.add(asset)
return asset
def view_task(self, task):
"""View the given task
:param task: the task to view
:type task: :class:`jukeboxcore.djadapter.models.Task`
:returns: None
:rtype: None
:raises: None
"""
log.debug('Viewing task %s', task.name)
self.cur_task = None
self.pages_tabw.setCurrentIndex(7)
self.task_dep_le.setText(task.name)
statusmap = {"New": 0, "Open": 1, "Done":2}
self.task_status_cb.setCurrentIndex(statusmap.get(task.status, -1))
dt = dt_to_qdatetime(task.deadline) if task.deadline else None
self.task_deadline_de.setDateTime(dt)
self.task_link_le.setText(task.element.name)
userrootdata = treemodel.ListItemData(['Username', 'First', 'Last', 'Email'])
userrootitem = treemodel.TreeItem(userrootdata)
for user in task.users.all():
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, userrootitem)
self.task_user_model = treemodel.TreeModel(userrootitem)
self.task_user_tablev.setModel(self.task_user_model)
self.cur_task = task
def shot_save(self, ):
"""Save the current shot
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
desc = self.shot_desc_pte.toPlainText()
start = self.shot_start_sb.value()
end = self.shot_end_sb.value()
handle = self.shot_handle_sb.value()
self.cur_shot.description = desc
self.cur_shot.startframe = start
self.cur_shot.endframe = end
self.cur_shot.handlesize = handle
self.cur_shot.save()
def asset_view_prj(self, ):
"""View the project of the current asset
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
prj = self.cur_asset.project
self.view_prj(prj)
def asset_view_atype(self, ):
"""View the project of the current atype
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
atype = self.cur_asset.atype
self.view_atype(atype)
def atype_view_asset(self, ):
"""View the project of the current assettype
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_atype:
return
i = self.atype_asset_treev.currentIndex()
item = i.internalPointer()
if item:
asset = item.internal_data()
if isinstance(asset, djadapter.models.Asset):
self.view_asset(asset)
def atype_create_asset(self, ):
"""Create a new asset
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_atype:
return
asset = self.create_asset(project=self.cur_prj, atype=self.cur_atype)
if not asset:
return
assetdata = djitemdata.AssetItemData(asset)
treemodel.TreeItem(assetdata, self.atype_asset_model.root)
def atype_save(self):
"""Save the current atype
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_atype:
return
desc = self.atype_desc_pte.toPlainText()
self.cur_atype.description = desc
self.cur_atype.save()
def asset_view_asset(self, ):
"""View the task that is currently selected on the asset page
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
i = self.asset_asset_treev.currentIndex()
item = i.internalPointer()
if item:
asset = item.internal_data()
if isinstance(asset, djadapter.models.Asset):
self.view_asset(asset)
def asset_add_asset(self, *args, **kwargs):
"""Add more assets to the asset.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
dialog = AssetAdderDialog(asset=self.cur_asset)
dialog.exec_()
assets = dialog.assets
atypes = {}
for c in self.asset_asset_model.root.childItems:
atypes[c.internal_data()] = c
for asset in assets:
atypeitem = atypes.get(asset.atype)
if not atypeitem:
atypedata = djitemdata.AtypeItemData(asset.atype)
atypeitem = treemodel.TreeItem(atypedata, self.asset_asset_model.root)
atypes[asset.atype] = atypeitem
assetdata = djitemdata.AssetItemData(asset)
treemodel.TreeItem(assetdata, atypeitem)
self.cur_asset.save()
def asset_remove_asset(self, *args, **kwargs):
"""Remove the, in the asset table view selected, asset.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
i = self.asset_asset_treev.currentIndex()
item = i.internalPointer()
if item:
asset = item.internal_data()
if not isinstance(asset, djadapter.models.Asset):
return
log.debug("Removing asset %s.", asset.name)
item.set_parent(None)
self.cur_asset.assets.remove(asset)
def asset_create_asset(self, *args, **kwargs):
"""Create a new asset
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
asset = self.create_asset(project=self.cur_asset.project, asset=self.cur_asset)
if not asset:
return
atypes = {}
for c in self.asset_asset_model.root.childItems:
atypes[c.internal_data()] = c
atypeitem = atypes.get(asset.atype)
if not atypeitem:
atypedata = djitemdata.AtypeItemData(asset.atype)
atypeitem = treemodel.TreeItem(atypedata, self.asset_asset_model.root)
atypes[asset.atype] = atypeitem
assetdata = djitemdata.AssetItemData(asset)
treemodel.TreeItem(assetdata, atypeitem)
def asset_view_task(self, ):
"""View the task that is currently selected on the asset page
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
i = self.asset_task_tablev.currentIndex()
item = i.internalPointer()
if item:
task = item.internal_data()
self.view_task(task)
def asset_create_task(self, *args, **kwargs):
"""Create a new task
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
task = self.create_task(element=self.cur_asset)
if task:
taskdata = djitemdata.TaskItemData(task)
treemodel.TreeItem(taskdata, self.asset_task_model.root)
def asset_save(self):
"""Save the current asset
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
desc = self.asset_desc_pte.toPlainText()
self.cur_asset.description = desc
self.cur_asset.save()
def dep_view_prj(self, ):
"""View the project that is currently selected
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_dep:
return
i = self.dep_prj_tablev.currentIndex()
item = i.internalPointer()
if item:
prj = item.internal_data()
self.view_prj(prj)
def dep_add_prj(self, *args, **kwargs):
"""Add projects to the current department
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_dep:
return
dialog = ProjectAdderDialog(department=self.cur_dep)
dialog.exec_()
prjs = dialog.projects
for prj in prjs:
prjdata = djitemdata.ProjectItemData(prj)
treemodel.TreeItem(prjdata, self.dep_prj_model.root)
def dep_remove_prj(self, *args, **kwargs):
"""Remove the selected project from the department
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_dep:
return
i = self.dep_prj_tablev.currentIndex()
item = i.internalPointer()
if item:
prj = item.internal_data()
self.cur_dep.projects.remove(prj)
item.set_parent(None)
def dep_save(self, ):
"""Save the current department
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_dep:
return
ordervalue = self.dep_ordervalue_sb.value()
desc = self.dep_desc_pte.toPlainText()
self.cur_dep.ordervalue = ordervalue
self.cur_dep.description = desc
self.cur_dep.save()
def task_view_user(self, ):
"""View the user that is currently selected
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_task:
return
i = self.task_user_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
self.view_user(user)
def task_add_user(self, *args, **kwargs):
"""Add users to the current task
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_task:
return
dialog = UserAdderDialog(task=self.cur_task)
dialog.exec_()
users = dialog.users
for user in users:
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, self.task_user_model.root)
def task_remove_user(self, *args, **kwargs):
"""Remove the selected user from the task
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_task:
return
i = self.task_user_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
self.cur_task.users.remove(user)
item.set_parent(None)
def task_view_dep(self, ):
"""View the departmetn of the current task
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_task:
return
self.view_dep(self.cur_task.department)
def task_view_link(self, ):
"""View the link of the current task
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_task:
return
e = self.cur_task.element
if isinstance(e, djadapter.models.Asset):
self.view_asset(e)
else:
self.view_shot(e)
def task_save(self, ):
"""Save the current task
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_task:
return
deadline = self.task_deadline_de.dateTime().toPython()
status = self.task_status_cb.currentText()
self.cur_task.deadline = deadline
self.cur_task.status = status
self.cur_task.save()
def users_view_user(self, ):
"""View the user that is currently selected
:returns: None
:rtype: None
:raises: None
"""
i = self.users_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
self.view_user(user)
def user_view_task(self, ):
"""View the task that is selected
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_user:
return
i = self.user_task_treev.currentIndex()
item = i.internalPointer()
if item:
task = item.internal_data()
if isinstance(task, djadapter.models.Task):
self.view_task(task)
def user_view_prj(self, ):
"""View the project that is currently selected
:returns: None
:rtype: None
:raises: None
"""
i = self.user_prj_tablev.currentIndex()
item = i.internalPointer()
if item:
prj = item.internal_data()
self.view_prj(prj)
def user_add_prj(self, *args, **kwargs):
"""Add projects to the current user
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_user:
return
dialog = ProjectAdderDialog(user=self.cur_user)
dialog.exec_()
prjs = dialog.projects
for prj in prjs:
prjdata = djitemdata.ProjectItemData(prj)
treemodel.TreeItem(prjdata, self.user_prj_model.root)
def user_remove_prj(self, *args, **kwargs):
"""Remove the selected project from the user
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_user:
return
i = self.user_prj_tablev.currentIndex()
item = i.internalPointer()
if item:
prj = item.internal_data()
prj.users.remove(self.cur_user)
item.set_parent(None)
def user_save(self):
"""Save the current user
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_user:
return
username = self.user_username_le.text()
first = self.user_first_le.text()
last = self.user_last_le.text()
email = self.user_email_le.text()
self.cur_user.username = username
self.cur_user.first_name = first
self.cur_user.last_name = last
self.cur_user.email = email
self.cur_user.save()
class GuerillaMGMT(JB_CoreStandaloneGuiPlugin):
"""A plugin that can run a GuerillaMGMT tool
This can be used as a standalone plugin.
Before you call run, make sure that there is a running
QApplication running. See :mod:`jukeboxcore.gui.main` for helpful functions.
"""
author = "David Zuber"
copyright = "2015"
version = "0.1"
description = "A guerilla tool for projectmanagement and creating entries in the database."
def init(self, ):
"""Do nothing on init! Call run() if you want to start the configeditor
:returns: None
:rtype: None
:raises: None
"""
pass
def uninit(self, ):
"""Do nothing on uninit!
:returns: None
:rtype: None
:raises: None
"""
pass
def run(self, parent=None):
"""Start the configeditor
:returns: None
:rtype: None
:raises: None
"""
self.gw = GuerillaMGMTWin(parent=parent)
self.gw.show()
| bsd-3-clause | -4,366,240,837,862,051,000 | 32.697019 | 142 | 0.592131 | false |
tbullmann/heuhaufen | publication/generators_and_depth/aggregate.py | 1 | 5320 | import os
import pandas
import numpy as np
from bokeh.palettes import Viridis4 as palette
from bokeh.layouts import layout, column, row
from bokeh.plotting import figure, output_file, show, ColumnDataSource
from bokeh.models import HoverTool, Div, DataTable, TableColumn, NumberFormatter, LinearAxis, Select, CustomJS, Slider, Button
import json # must be imported after bokeh
def main(test_path='temp/publication/how_deep/test'):
labels = ['membranes', 'synapses', 'mitochondria']
# concatenate the evaluation and parameters for all runs
dfs = []
for label in labels:
for run in range(1,21):
df = read_run_from_json_and_csv(test_path, run, label)
dfs.append(df)
data = pandas.concat(dfs)
# save aggregated data (in long format)
data.to_csv(os.path.join(test_path, 'summary_long.csv'))
# convert long to wide: label x metric --> label_metric
metrics = data.columns.to_series().groupby(data.dtypes).groups[np.dtype('float64')]
data2 = data.pivot_table(index=['generator', 'layers', 'sample'], columns='label', values=metrics)
data2.columns = ['{}_{}'.format(x, y) for x, y in
zip(data2.columns.get_level_values(1), data2.columns.get_level_values(0))]
data2 = data2.reset_index()
# save aggregated data (in wide format)
data2.to_csv(os.path.join(test_path, 'summary_wide.csv'))
# TODO: interactive plot with bokeh
# bokeh_plot(data2, test_path) # not fully functional, e.g. cannot change label and metric
def read_run_from_json_and_csv(test_path, run, label):
# path to the test result for a particular model
base_path = os.path.join(test_path, '%d' % run)
# getting parameters from the options json file
with open(os.path.join(base_path, "options.json")) as f:
options = dict(json.loads(f.read()).items())
generator = options['generator']
# calculate the number of layers depending on generator network and its specific parameters
if generator == 'unet':
layers = options['u_depth'] * 2 # 1 for down sampling and 1 for up sampling at each level
elif generator == 'densenet':
layers = options['n_dense_blocks'] * options['n_dense_layers'] + 6 # 3 for each encoder and decoder
elif generator == 'resnet':
layers = options['n_res_blocks'] * 2 + 6 # 2 for transformation, 3 for each encoder and decoder
elif generator == 'highwaynet':
layers = options['n_highway_units'] * 2 + 6 # 2 for transformation, 3 for each encoder and decoder
# read evaluation results
df = pandas.read_csv(os.path.join(base_path, 'evaluation/%s.csv' % label)) # no index_col
# add parameters
df['generator'] = generator
df['layers'] = layers
df['label'] = label
df['run'] = run
return df
def bokeh_plot(data, test_path):
networks = ['unet', 'resnet', 'highwaynet', 'densenet']
# assuming all float values are metrics
metrics = data.columns.to_series().groupby(data.dtypes).groups[np.dtype('float64')]
# calculate mean for each
data_mean = data.groupby(['generator', 'layers'])[metrics].mean().reset_index()
source = dict()
source_mean = dict()
for network in networks:
source[network] = ColumnDataSource(data[data.generator == network])
source_mean[network] = ColumnDataSource(data_mean[data_mean.generator == network])
output_file(os.path.join(test_path, "select.html"))
description = Div(text="""
<h1>Evaluation of network type and depth for generator</h1>
<p>
Interact with the widgets to select metric and evaluated label.
</p>
""", width=1000)
fig = figure(plot_width=1000, plot_height=1000, tools=['box_select', 'reset'])
fig.xaxis.axis_label = "layers"
fig.yaxis.axis_label = "value of metric"
plots = []
for network, column_color in zip(networks, palette):
plot = fig.line('layers', metrics[0], legend=dict(value=network), color=column_color,
source=source_mean[network])
plot = fig.scatter('layers', metrics[0], legend=dict(value=network), color=column_color, source=source[network])
# legend which can hide/select a specific metric
fig.legend.location = "bottom_right"
fig.legend.click_policy = "hide"
choices = metrics
axis = 'y'
axis_callback_code = """
plot.glyph.{axis}.field = cb_obj.value
axis.attributes.axis_label = cb_obj.value;
axis.trigger('change');
source.change.emit();
"""
if axis == 'x':
fig.xaxis.visible = None
position = 'below'
initial_choice = 0
else:
fig.yaxis.visible = None
position = 'left'
initial_choice = 1
linear_axis = LinearAxis(axis_label=choices[initial_choice])
fig.add_layout(linear_axis, position)
callback1 = CustomJS(args=dict(source=source[network], axis=linear_axis, plot=plot),
code=axis_callback_code.format(axis=axis))
ticker = Select(value=choices[initial_choice], options=choices, title=axis + '-axis')
ticker.js_on_change('value', callback1)
l = layout([
[description],
[ticker],
[fig]
], sizing_mode='fixed')
show(l)
if __name__ == "__main__":
main()
else:
main()
| mit | 2,667,029,470,658,940,000 | 37 | 126 | 0.644361 | false |
Statoil/libres | python/res/enkf/ert_template.py | 1 | 2187 | # Copyright (C) 2012 Equinor ASA, Norway.
#
# The file 'ert_template.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCClass
from res import ResPrototype
class ErtTemplate(BaseCClass):
TYPE_NAME = "ert_template"
_free = ResPrototype("void ert_template_free( ert_template )")
_get_template_file = ResPrototype("char* ert_template_get_template_file(ert_template)")
_get_target_file = ResPrototype("char* ert_template_get_target_file(ert_template)")
_get_arg_list = ResPrototype("subst_list_ref ert_template_get_arg_list( ert_template )")
def __init__(self):
raise NotImplementedError("Class can not be instantiated directly!")
def get_template_file(self):
""" @rtype: str """
return self._get_template_file()
def get_target_file(self):
""" @rtype: str """
return self._get_target_file()
def get_args_as_string(self):
""" @rtype: str """
args_list = self._get_arg_list()
return ", ".join(["{}={}".format(key, args_list.get(key)) for key in args_list.keys()])
def __eq__(self, other):
return self.get_template_file() == other.get_template_file() and\
self.get_target_file() == other.get_target_file() and\
self.get_args_as_string() == other.get_args_as_string()
def __ne__(self, other):
return not self == other
def __repr__(self):
return "ErtTemplate({}, {}, {})".format(
self.get_template_file(),
self.get_target_file(),
self.get_args_as_string())
def free(self):
self._free()
| gpl-3.0 | 87,037,044,405,299,280 | 36.067797 | 98 | 0.631459 | false |
blueskyjunkie/timeTools | timetools/synchronization/compliance/ituTG8261/eecOption1/networkWander.py | 1 | 1252 | #
# Copyright 2017 Russell Smiley
#
# This file is part of timetools.
#
# timetools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# timetools is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with timetools. If not, see <http://www.gnu.org/licenses/>.
#
import timetools.synchronization.compliance.analysis as tsca
# Rec. ITU-T G.8261/Y.1361 (08/2013), Section 9.2.1.1, Table 4, pp 20
mtieNs = tsca.Mask([ ([0.1, 2.5], [250]),
([2.5, 20], [0, 100]),
([20, 2000], [2000]),
([2000], ([0.01, 433], [1, 0.2])) ])
# Rec. ITU-T G.8261/Y.1361 (08/2013), Section 9.2.1.1, Table 5, pp 21
tdevNs = tsca.Mask([ ([0.1, 17.14], [12]),
([17.14, 100], [0, 0.7]),
([100, 1e6], ([58, 1.2, 3e-4], [0, 0.5, 1])) ])
| gpl-3.0 | 6,965,722,777,181,384,000 | 34.771429 | 70 | 0.618211 | false |
oemof/oemof_base | src/oemof/solph/groupings.py | 1 | 2875 | # -*- coding: utf-8 -*-
"""Groupings needed on an energy system for it to work with solph.
If you want to use solph on an energy system, you need to create it with these
groupings specified like this:
.. code-block: python
from oemof.network import EnergySystem
import solph
energy_system = EnergySystem(groupings=solph.GROUPINGS)
SPDX-FileCopyrightText: Uwe Krien <[email protected]>
SPDX-FileCopyrightText: Simon Hilpert
SPDX-FileCopyrightText: Cord Kaldemeyer
SPDX-FileCopyrightText: Stephan Günther
SPDX-License-Identifier: MIT
"""
from oemof.network import groupings as groupings
from oemof.solph import blocks
def constraint_grouping(node, fallback=lambda *xs, **ks: None):
"""Grouping function for constraints.
This function can be passed in a list to :attr:`groupings` of
:class:`oemof.solph.network.EnergySystem`.
Parameters
----------
node : :class:`Node <oemof.network.Node`
The node for which the figure out a constraint group.
fallback : callable, optional
A function of one argument. If `node` doesn't have a `constraint_group`
attribute, this is used to group the node instead. Defaults to not
group the node at all.
"""
# TODO: Refactor this for looser coupling between modules.
# This code causes an unwanted tight coupling between the `groupings` and
# `network` modules, resulting in having to do an import at runtime in the
# init method of solph's `EnergySystem`. A better way would be to add a
# method (maybe `constraints`, `constraint_group`, `constraint_type` or
# something like that) to solph's node hierarchy, which gets overridden in
# each subclass to return the appropriate value. Then we can just call the
# method here.
# This even gives other users/us the ability to customize/extend how
# constraints are grouped by overriding the method in future subclasses.
cg = getattr(node, "constraint_group", fallback)
return cg()
standard_flow_grouping = groupings.FlowsWithNodes(constant_key=blocks.Flow)
def _investment_grouping(stf):
if hasattr(stf[2], "investment"):
if stf[2].investment is not None:
return True
else:
return False
investment_flow_grouping = groupings.FlowsWithNodes(
constant_key=blocks.InvestmentFlow,
# stf: a tuple consisting of (source, target, flow), so stf[2] is the flow.
filter=_investment_grouping,
)
def _nonconvex_grouping(stf):
if hasattr(stf[2], "nonconvex"):
if stf[2].nonconvex is not None:
return True
else:
return False
nonconvex_flow_grouping = groupings.FlowsWithNodes(
constant_key=blocks.NonConvexFlow, filter=_nonconvex_grouping
)
GROUPINGS = [
constraint_grouping,
investment_flow_grouping,
standard_flow_grouping,
nonconvex_flow_grouping,
]
| gpl-3.0 | 3,557,383,444,322,008,600 | 29.252632 | 79 | 0.707376 | false |
NIRALUser/RodentThickness | Applications/RodentThickness/Testing/Data/vtkPointAttributes.py | 2 | 3379 |
import sys
# path="/tools/Python/Python-2.7.3/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg:/tools/Python/Python-2.7.3/lib/python2.7/site-packages/pip-1.0-py2.7.egg:/tools/Python/Python-2.7.3/lib/python2.7/site-packages/VTK-6.0-py2.7.egg:/tools/Python/Python-2.7.3/lib/python2.7/site-packages/distribute-0.6.28-py2.7.egg:/tools/Python/Python-2.7.3/lib/python2.7/site-packages/SimpleITK-0.6.0.dev208-py2.7.egg:/tools/Python/Python-2.7.3/lib/python27.zip:/tools/Python/Python-2.7.3/lib/python2.7:/tools/Python/Python-2.7.3/lib/python2.7/plat-linux2:/tools/Python/Python-2.7.3/lib/python2.7/lib-tk:/tools/Python/Python-2.7.3/lib/python2.7/lib-old:/tools/Python/Python-2.7.3/lib/python2.7/lib-dynload:/tools/Python/Python-2.7.3/lib/python2.7/site-packages".split(":")
# for p in path:
# sys.path.insert(0, p)
from vtk import *
from optparse import OptionParser
import csv,sys
import niralvtk as nv
import numpy as np
def removeAttributes(opts, args):
print args
inputvtk = args[0]
outputvtk = args[1]
inVTK = nv.readVTK(inputvtk);
nArrays = inVTK.GetPointData().GetNumberOfArrays()
arrayNames = []
for k in range(0,nArrays):
arrayNames.append(inVTK.GetPointData().GetArrayName(k))
print arrayNames
for name in arrayNames:
inVTK.GetPointData().RemoveArray(name)
nv.writeVTK(outputvtk, inVTK)
def main(opts, argv):
inputvtk = argv[0]
outputvtk = argv[1]
inVTK = nv.readVTK(inputvtk);
if (opts.sep == "0"):
csvreader = csv.reader(open(opts.csvfile, "r"))
elif (opts.sep == "1"):
csvreader = csv.reader(open(opts.csvfile, "r"), delimiter=',')
elif (opts.sep == "2"):
csvreader = csv.reader(open(opts.csvfile, "r"), delimiter='\t')
first = csvreader.next()
if (opts.header):
header = first
first = csvreader.next()
if (opts.names != ""):
header = opts.names.split(",")
print header
nCols = len(first)
nPoints = inVTK.GetNumberOfPoints()
data = np.zeros([nPoints,nCols])
for k in range(0,nCols):
data[0,k] = float(first[k])
print "# points:", nPoints
for j in range(1,nPoints):
print j
first = csvreader.next()
for k in range(0,nCols):
data[j,k] = float(first[k])
for k in range(0,nCols):
arr = vtkDoubleArray()
if (len(header) > 0):
arr.SetName(header[k])
arr.SetNumberOfTuples(nPoints)
for j in range(0,nPoints):
arr.SetValue(j,data[j,k])
inVTK.GetPointData().AddArray(arr)
nv.writeVTK(outputvtk, inVTK)
if (__name__ == "__main__"):
parser = OptionParser(usage="usage: %prog [options] input-vtk output-vtk")
parser.add_option("-i", "--input", dest="csvfile", help="input attribute csv file", metavar="CSVFILE")
parser.add_option("-t", "--title", dest="header", help="use first line as header", action="store_true", default=False);
parser.add_option("-n", "--columnNames", dest="names", help="use this as column names", metavar="NAME1,NAME2,...", default="");
parser.add_option("-r", "--removeAttributes", dest="removeAttrs", help="remove all attributes", action="store_true", default=False);
parser.add_option("-s", "--separator", dest="sep", help="separator (0=space, 1=comma, 2=tab)", default="0")
(opts, args) = parser.parse_args()
if (len(args) < 2):
parser.print_help()
else:
if (opts.removeAttrs):
removeAttributes(opts, args)
else:
main(opts, args)
| gpl-3.0 | 4,282,565,386,402,492,400 | 33.479592 | 762 | 0.669725 | false |
imsut/commons | src/python/twitter/pants/targets/jvm_target.py | 1 | 2954 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
from twitter.pants.targets.internal import InternalTarget
from twitter.pants.targets.jar_dependency import JarDependency
from twitter.pants.targets.with_sources import TargetWithSources
class JvmTarget(InternalTarget, TargetWithSources):
"""A base class for all java module targets that provides path and dependency translation."""
def __init__(self, name, sources, dependencies,
excludes=None,
buildflags=None,
is_meta=False,
configurations=None):
InternalTarget.__init__(self, name, dependencies, is_meta)
TargetWithSources.__init__(self, name, is_meta)
self.declared_dependencies = set(dependencies or [])
self.add_label('jvm')
self.sources = self._resolve_paths(self.target_base, sources) or []
for source in self.sources:
rel_path = os.path.join(self.target_base, source)
TargetWithSources.register_source(rel_path, self)
self.excludes = excludes or []
self.buildflags = buildflags or []
custom_antxml = '%s.xml' % self.name
buildfile = self.address.buildfile.full_path
custom_antxml_path = os.path.join(os.path.dirname(buildfile), custom_antxml)
self.custom_antxml_path = custom_antxml_path if os.path.exists(custom_antxml_path) else None
self.configurations = configurations
def _as_jar_dependency(self):
jar_dependency, _, _ = self._get_artifact_info()
jar = JarDependency(org = jar_dependency.org, name = jar_dependency.name, rev = None)
jar.id = self.id
return jar
def _as_jar_dependencies(self):
yield self._as_jar_dependency()
def _get_artifact_info(self):
provides = self._provides()
exported = bool(provides)
org = provides.org if exported else 'internal'
module = provides.name if exported else self.id
version = provides.rev if exported else None
id = "%s-%s" % (provides.org, provides.name) if exported else self.id
return JarDependency(org = org, name = module, rev = version), id, exported
def _provides(self):
return None
| apache-2.0 | -2,108,860,342,207,043,300 | 40.027778 | 100 | 0.636425 | false |
ver228/tierpsy-tracker | tierpsy/helper/misc/file_processing.py | 1 | 2071 | import os
import tables
from .misc import TABLE_FILTERS
RESERVED_EXT = ['_skeletons.hdf5',
'_trajectories.hdf5',
'_features.hdf5',
'_intensities.hdf5',
'_feat_manual.hdf5',
'_subsample.avi',
'.wcon.zip',
'_featuresN.hdf5'
]
IMG_EXT = ['.png', '.jpeg', '.jpg', '.tif', '.tiff', '.bmp']
def remove_ext(fname):
for rext in RESERVED_EXT:
if fname.endswith(rext):
return fname.replace(rext, '')
return os.path.splitext(fname)[0]
def get_base_name(fname):
return os.path.basename(remove_ext(fname))
def replace_subdir(original_dir, original_subdir, new_subdir):
# construct the results dir on base of the mask_dir_root
original_dir = os.path.normpath(original_dir)
subdir_list = original_dir.split(os.sep)
for ii in range(len(subdir_list))[::-1]:
if subdir_list[ii] == original_subdir:
subdir_list[ii] = new_subdir
break
# the counter arrived to zero, add new_subdir at the end of the directory
if ii == 0:
if subdir_list[-1] == '':
del subdir_list[-1]
subdir_list.append(new_subdir)
return (os.sep).join(subdir_list)
def save_modified_table(file_name, modified_table, table_name):
tab_recarray = modified_table.to_records(index=False)
with tables.File(file_name, "r+") as fid:
dum_name = table_name + '_d'
if '/' + dum_name in fid:
fid.remove_node('/', dum_name)
newT = fid.create_table(
'/',
dum_name,
obj=tab_recarray,
filters=TABLE_FILTERS)
oldT = fid.get_node('/' + table_name)
old_args = [x for x in dir(oldT._v_attrs) if not x.startswith('_')]
for key in old_args:
if not key in newT._v_attrs and not key.startswith('FIELD'):
newT.attrs[key] = oldT.attrs[key]
fid.remove_node('/', table_name)
newT.rename(table_name)
| mit | -487,200,462,895,574,000 | 30.861538 | 77 | 0.551424 | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/config/launch_best.py | 1 | 1734 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition
# -----------------------------------------------------------------
# Create the configuration
definition = ConfigurationDefinition()
definition.add_section("wavelengths")
definition.sections["wavelengths"].add_optional("unit", str, "the unit of the wavelengths", "micron")
definition.sections["wavelengths"].add_optional("min", float, "the minimum wavelength", 0.05)
definition.sections["wavelengths"].add_optional("max", float, "the maximum wavelength", 1000)
definition.sections["wavelengths"].add_optional("npoints", int, "the number of wavelength points", 300)
definition.sections["wavelengths"].add_optional("min_zoom", float, "the minium wavelength of the zoomed-in grid", 1)
definition.sections["wavelengths"].add_optional("max_zoom", float, "the maximum wavelength of the zoomed-in grid", 30)
definition.sections["wavelengths"].add_optional("npoints_zoom", int, "the number of wavelength points in the zoomed-in grid", 300)
definition.add_optional("packages", float, "the number of photon packages per wavelength", 1e6)
definition.add_optional("selfabsorption", bool, "whether self-absorption should be enabled", True)
definition.add_optional("remote", str, "the remote host on which to launch the simulations", "nancy")
# -----------------------------------------------------------------
| mit | -1,689,315,003,347,024,400 | 58.758621 | 130 | 0.629544 | false |
Jarn/jarn.mkrelease | jarn/mkrelease/tests/test_git.py | 1 | 18107 | import unittest
import os
from os.path import join, isdir
from jarn.mkrelease.scm import Git
from jarn.mkrelease.process import Process
from jarn.mkrelease.testing import GitSetup
from jarn.mkrelease.testing import MockProcess
from jarn.mkrelease.testing import quiet
class ValidUrlTests(unittest.TestCase):
def testGitUrl(self):
scm = Git()
self.assertEqual(scm.is_valid_url('git://'), True)
def testRsyncUrl(self):
scm = Git()
self.assertEqual(scm.is_valid_url('rsync://'), True)
def testSshUrl(self):
scm = Git()
self.assertEqual(scm.is_valid_url('ssh://'), True)
def testHttpUrl(self):
scm = Git()
self.assertEqual(scm.is_valid_url('http://'), True)
def testHttpsUrl(self):
scm = Git()
self.assertEqual(scm.is_valid_url('https://'), True)
def testFileUrl(self):
scm = Git()
self.assertEqual(scm.is_valid_url('file://'), True)
def testBadProtocol(self):
scm = Git()
self.assertEqual(scm.is_valid_url('svn://'), False)
def testEmptyString(self):
scm = Git()
self.assertEqual(scm.is_valid_url(''), False)
def testGitSshUrl(self):
scm = Git()
self.assertEqual(scm.is_valid_url('[email protected]:Jarn/jarn.mkrelease'), True)
class ValidSandboxTests(GitSetup):
def testSandbox(self):
scm = Git()
self.assertEqual(scm.is_valid_sandbox(self.packagedir), True)
def testSubdirOfSandbox(self):
scm = Git()
self.assertEqual(scm.is_valid_sandbox(join(self.packagedir, 'testpackage')), True)
def testNotExists(self):
scm = Git()
self.assertEqual(scm.is_valid_sandbox('foo'), False)
def testNotADir(self):
scm = Git()
self.assertEqual(scm.is_valid_sandbox(join(self.packagedir, 'setup.py')), False)
def testNotACheckout(self):
scm = Git()
self.destroy()
self.assertEqual(scm.is_valid_sandbox(self.packagedir), False)
@quiet
def testCheckRaises(self):
scm = Git()
self.assertRaises(SystemExit, scm.check_valid_sandbox, 'foo')
self.assertRaises(SystemExit, scm.check_valid_sandbox, join(self.packagedir, 'setup.py'))
self.destroy()
self.assertRaises(SystemExit, scm.check_valid_sandbox, self.packagedir)
class RootFromSandboxTests(GitSetup):
def testGetRoot(self):
scm = Git()
self.assertEqual(scm.get_root_from_sandbox(self.packagedir),
self.packagedir)
def testGetSubfolderRoot(self):
scm = Git()
self.assertEqual(scm.get_root_from_sandbox(join(self.packagedir, 'testpackage')),
self.packagedir)
def testGetCloneRoot(self):
scm = Git()
self.clone()
self.assertEqual(scm.get_root_from_sandbox(self.clonedir),
self.clonedir)
def testGetCloneSubfolderRoot(self):
scm = Git()
self.clone()
self.assertEqual(scm.get_root_from_sandbox(join(self.clonedir, 'testpackage')),
self.clonedir)
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.get_root_from_sandbox, self.packagedir)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.get_root_from_sandbox, self.packagedir)
class BranchFromSandboxTests(GitSetup):
def testGetLocalBranch(self):
scm = Git()
self.assertEqual(scm.get_branch_from_sandbox(self.packagedir), 'master')
def testGetLocalBranchFromBranch(self):
scm = Git()
self.branch(self.packagedir, '2.x')
self.assertEqual(scm.get_branch_from_sandbox(self.packagedir), '2.x')
def testGetRemoteBranch(self):
scm = Git()
self.clone()
self.assertEqual(scm.get_branch_from_sandbox(self.clonedir), 'master')
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.get_branch_from_sandbox, self.packagedir)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.get_branch_from_sandbox, self.packagedir)
class RemoteFromSandboxTests(GitSetup):
def testGetLocal(self):
scm = Git()
self.assertEqual(scm.get_remote_from_sandbox(self.packagedir), '')
def testGetRemote(self):
scm = Git()
self.clone()
self.assertEqual(scm.get_remote_from_sandbox(self.clonedir), 'origin')
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.get_remote_from_sandbox, self.packagedir)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.get_remote_from_sandbox, self.packagedir)
@quiet
def testWhitebox(self):
def func(cmd):
if cmd == 'git branch':
return 0, ['* master']
return 1, []
scm = Git(MockProcess(func=func))
self.assertRaises(SystemExit, scm.get_remote_from_sandbox, self.packagedir)
class TrackedBranchFromSandboxTests(GitSetup):
def testGetLocal(self):
scm = Git()
self.assertEqual(scm.get_tracked_branch_from_sandbox(self.packagedir), '')
def testGetRemote(self):
scm = Git()
self.clone()
self.assertEqual(scm.get_tracked_branch_from_sandbox(self.clonedir), 'master')
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.get_tracked_branch_from_sandbox, self.packagedir)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.get_tracked_branch_from_sandbox, self.packagedir)
@quiet
def testWhitebox(self):
def func(cmd):
if cmd == 'git branch':
return 0, ['* master']
return 1, []
scm = Git(MockProcess(func=func))
self.assertRaises(SystemExit, scm.get_tracked_branch_from_sandbox, self.packagedir)
class UrlFromSandboxTests(GitSetup):
def testGetLocalUrl(self):
scm = Git()
self.assertEqual(scm.get_url_from_sandbox(self.packagedir), '')
def testGetRemoteUrl(self):
scm = Git()
self.clone()
self.assertEqual(scm.get_url_from_sandbox(self.clonedir), self.packagedir)
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.get_url_from_sandbox, self.packagedir)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.get_url_from_sandbox, self.packagedir)
@quiet
def testWhitebox(self):
self.called = 0
def func(cmd):
if cmd == 'git branch':
return 0, ['* master']
if cmd == 'git config -l':
self.called += 1
if self.called == 1:
return 0, ['branch.master.remote=origin']
return 1, []
scm = Git(MockProcess(func=func))
self.assertRaises(SystemExit, scm.get_url_from_sandbox, self.packagedir)
class RemoteSandboxTests(GitSetup):
def testIsLocal(self):
scm = Git()
self.assertEqual(scm.is_remote_sandbox(self.packagedir), False)
def testIsRemote(self):
scm = Git()
self.clone()
self.assertEqual(scm.is_remote_sandbox(self.clonedir), True)
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.is_remote_sandbox, self.packagedir)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.is_remote_sandbox, self.packagedir)
class DirtySandboxTests(GitSetup):
def testCleanSandbox(self):
scm = Git()
self.assertEqual(scm.is_dirty_sandbox(self.packagedir), False)
def testModifiedFile(self):
scm = Git()
self.modify(self.packagedir)
self.assertEqual(scm.is_dirty_sandbox(self.packagedir), True)
def testRemovedFile(self):
scm = Git()
self.remove(self.packagedir)
self.assertEqual(scm.is_dirty_sandbox(self.packagedir), True)
def testDeletedButTrackedFile(self):
scm = Git()
self.delete(self.packagedir)
# Note: The sandbox is reported as *dirty*
self.assertEqual(scm.is_dirty_sandbox(self.packagedir), True)
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.is_dirty_sandbox, self.packagedir)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=128))
self.assertRaises(SystemExit, scm.is_dirty_sandbox, self.packagedir)
@quiet
def testCheckRaises(self):
scm = Git()
self.modify(self.packagedir)
self.assertRaises(SystemExit, scm.check_dirty_sandbox, self.packagedir)
class UncleanSandboxTests(GitSetup):
def testCleanSandbox(self):
scm = Git()
self.assertEqual(scm.is_unclean_sandbox(self.packagedir), False)
def testModifiedFile(self):
scm = Git()
self.modify(self.packagedir)
self.assertEqual(scm.is_unclean_sandbox(self.packagedir), True)
def testRemovedFile(self):
scm = Git()
self.remove(self.packagedir)
self.assertEqual(scm.is_unclean_sandbox(self.packagedir), True)
def testDeletedButTrackedFile(self):
scm = Git()
self.delete(self.packagedir)
# Note: The sandbox is reported as unclean
self.assertEqual(scm.is_unclean_sandbox(self.packagedir), True)
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.is_unclean_sandbox, self.packagedir)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=128))
self.assertRaises(SystemExit, scm.is_unclean_sandbox, self.packagedir)
@quiet
def testCheckRaises(self):
scm = Git()
self.modify(self.packagedir)
self.assertRaises(SystemExit, scm.check_unclean_sandbox, self.packagedir)
class CommitSandboxTests(GitSetup):
def testCommitCleanSandbox(self):
scm = Git(Process(quiet=True))
self.assertEqual(scm.commit_sandbox(self.packagedir, 'testpackage', '2.6', False), 0)
def testCommitDirtySandbox(self):
scm = Git(Process(quiet=True))
self.modify(self.packagedir)
self.assertEqual(scm.commit_sandbox(self.packagedir, 'testpackage', '2.6', False), 0)
@quiet
def testCommitAndPushCleanLocalSandbox(self):
scm = Git(Process(quiet=True))
self.assertEqual(scm.commit_sandbox(self.packagedir, 'testpackage', '2.6', True), 0)
@quiet
def testCommitAndPushDirtyLocalSandbox(self):
scm = Git(Process(quiet=True))
self.modify(self.packagedir)
self.assertEqual(scm.commit_sandbox(self.packagedir, 'testpackage', '2.6', True), 0)
def testCommitAndPushCleanRemoteSandbox(self):
scm = Git(Process(quiet=True))
self.clone()
self.assertEqual(scm.commit_sandbox(self.clonedir, 'testpackage', '2.6', True), 0)
def testCommitAndPushDirtyRemoteSandbox(self):
scm = Git(Process(quiet=True))
self.clone()
self.modify(self.clonedir)
self.assertEqual(scm.commit_sandbox(self.clonedir, 'testpackage', '2.6', True), 0)
self.verify(self.clonedir)
self.update(self.packagedir)
self.verify(self.packagedir)
@quiet
def testBadPush(self):
scm = Git(Process(quiet=True))
self.clone()
self.destroy()
self.assertRaises(SystemExit, scm.commit_sandbox, self.clonedir, 'testpackage', '2.6', True)
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.commit_sandbox, self.packagedir, 'testpackage', '2.6', False)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=255))
self.assertRaises(SystemExit, scm.commit_sandbox, self.packagedir, 'testpackage', '2.6', False)
class CloneUrlTests(GitSetup):
def testCloneUrl(self):
scm = Git(Process(quiet=True))
self.assertEqual(scm.clone_url(self.packagedir, 'testclone'), 0)
self.assertEqual(isdir('testclone'), True)
@quiet
def testBadServer(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.clone_url, self.packagedir, 'testclone')
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.clone_url, self.packagedir, 'testclone')
class BranchIdTests(GitSetup):
def testMakeBranchId(self):
scm = Git()
self.assertEqual(scm.make_branchid(self.packagedir, '2.x'), '2.x')
def testEmptyBranchId(self):
scm = Git()
self.assertEqual(scm.make_branchid(self.packagedir, ''), 'master')
class SwitchBranchTests(GitSetup):
@quiet
def testSwitchBranch(self):
scm = Git(Process(quiet=True))
self.branch(self.packagedir, '2.x')
self.assertEqual(scm.get_branch_from_sandbox(self.packagedir), '2.x')
self.assertEqual(scm.switch_branch(self.packagedir, 'master'), 0)
self.assertEqual(scm.get_branch_from_sandbox(self.packagedir), 'master')
@quiet
def testSwitchSameBranch(self):
scm = Git()
self.assertEqual(scm.get_branch_from_sandbox(self.packagedir), 'master')
self.assertEqual(scm.switch_branch(self.packagedir, 'master'), 0)
self.assertEqual(scm.get_branch_from_sandbox(self.packagedir), 'master')
@quiet
def testSwitchRemoteBranch(self):
scm = Git(Process(quiet=True))
self.branch(self.packagedir, '2.x')
self.clone()
self.assertEqual(scm.get_branch_from_sandbox(self.clonedir), 'master')
self.assertEqual(scm.switch_branch(self.clonedir, '2.x'), 0)
self.assertEqual(scm.get_branch_from_sandbox(self.clonedir), '2.x')
@quiet
def testSwitchUnknownBranch(self):
scm = Git(Process(quiet=True))
self.assertRaises(SystemExit, scm.switch_branch, self.packagedir, '2.x')
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.switch_branch, self.packagedir, 'master')
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.switch_branch, self.packagedir, 'master')
class TagExistsTests(GitSetup):
def testTagDoesNotExist(self):
scm = Git()
self.assertEqual(scm.tag_exists(self.packagedir, '2.6'), False)
def testTagExists(self):
scm = Git()
self.tag(self.packagedir, '2.6')
self.assertEqual(scm.tag_exists(self.packagedir, '2.6'), True)
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.check_tag_exists, self.packagedir, '2.6')
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.check_tag_exists, self.packagedir, '2.6')
@quiet
def testCheckRaises(self):
scm = Git()
self.tag(self.packagedir, '2.6')
self.assertRaises(SystemExit, scm.check_tag_exists, self.packagedir, '2.6')
class CreateTagTests(GitSetup):
def testCreateTag(self):
scm = Git()
self.assertEqual(scm.tag_exists(self.packagedir, '2.6'), False)
self.assertEqual(scm.create_tag(self.packagedir, '2.6', 'testpackage', '2.6', False), 0)
self.assertEqual(scm.tag_exists(self.packagedir, '2.6'), True)
@quiet
def testCreateExistingTag(self):
scm = Git(Process(quiet=True))
self.assertEqual(scm.create_tag(self.packagedir, '2.6', 'testpackage', '2.6', False), 0)
self.assertEqual(scm.tag_exists(self.packagedir, '2.6'), True)
self.assertRaises(SystemExit, scm.create_tag, self.packagedir, '2.6', 'testpackage', '2.6', False)
@quiet
def testCreateAndPushLocalTag(self):
scm = Git()
self.assertEqual(scm.create_tag(self.packagedir, '2.6', 'testpackage', '2.6', True), 0)
self.assertEqual(scm.tag_exists(self.packagedir, '2.6'), True)
def testCreateAndPushRemoteTag(self):
scm = Git(Process(quiet=True))
self.clone()
self.assertEqual(scm.create_tag(self.clonedir, '2.6', 'testpackage', '2.6', True), 0)
self.assertEqual(scm.tag_exists(self.clonedir, '2.6'), True)
self.assertEqual(scm.tag_exists(self.packagedir, '2.6'), True)
@quiet
def testBadPush(self):
scm = Git(Process(quiet=True))
self.clone()
self.destroy()
self.assertRaises(SystemExit, scm.create_tag, self.packagedir, '2.6', 'testpackage', '2.6', True)
@quiet
def testBadSandbox(self):
scm = Git(Process(quiet=True))
self.destroy()
self.assertRaises(SystemExit, scm.create_tag, self.packagedir, '2.6', 'testpackage', '2.6', False)
@quiet
def testBadProcess(self):
scm = Git(MockProcess(rc=1))
self.assertRaises(SystemExit, scm.create_tag, self.packagedir, '2.6', 'testpackage', '2.6', False)
class GetVersionTests(unittest.TestCase):
def testGetVersion(self):
scm = Git()
self.assertNotEqual(scm.get_version(), '')
def testVersionInfo(self):
scm = Git()
self.assertNotEqual(scm.version_info, ())
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| bsd-2-clause | -2,791,792,132,245,524,000 | 30.490435 | 106 | 0.638317 | false |
Subsets and Splits