repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ramrom/haus | gmail.py | 1 | 2126 | #!/usr/local/bin/python
import httplib2
import os, pdb
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET_FILE = '/Users/smittapalli/.creds/gcloud_oauth2_webapp_haus.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.creds')
credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
"""Shows basic usage of the Gmail API.
Creates a Gmail API service object and outputs a list of label names
of the user's Gmail account.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
results = service.users().labels().list(userId='me').execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print('Labels:')
for label in labels:
print(label['name'])
if __name__ == '__main__':
pdb.set_trace()
#main()
| mit | -8,412,286,651,052,823,000 | 28.943662 | 80 | 0.716369 | false |
ESOedX/edx-platform | common/test/acceptance/tests/lms/test_problem_types.py | 1 | 72649 | """
Bok choy acceptance and a11y tests for problem types in the LMS
"""
from __future__ import absolute_import
import random
import textwrap
from abc import ABCMeta, abstractmethod
import ddt
import pytest
import six
from bok_choy.promise import BrokenPromise
from selenium.webdriver import ActionChains
from six.moves import range
from capa.tests.response_xml_factory import (
AnnotationResponseXMLFactory,
ChoiceResponseXMLFactory,
ChoiceTextResponseXMLFactory,
CodeResponseXMLFactory,
CustomResponseXMLFactory,
FormulaResponseXMLFactory,
ImageResponseXMLFactory,
JSInputXMLFactory,
MultipleChoiceResponseXMLFactory,
NumericalResponseXMLFactory,
OptionResponseXMLFactory,
StringResponseXMLFactory,
SymbolicResponseXMLFactory
)
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.tests.helpers import EventsTestMixin, select_option_by_text
from common.test.acceptance.tests.lms.test_lms_problems import ProblemsTest
from openedx.core.lib.tests import attr
class ProblemTypeTestBaseMeta(ABCMeta):
"""
MetaClass for ProblemTypeTestBase to ensure that the required attributes
are defined in the inheriting classes.
"""
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
required_attrs = [
'problem_name',
'problem_type',
'factory',
'factory_kwargs',
'status_indicators',
]
for required_attr in required_attrs:
msg = (u'{} is a required attribute for {}').format(
required_attr, str(cls)
)
try:
if obj.__getattribute__(required_attr) is None:
raise NotImplementedError(msg)
except AttributeError:
raise NotImplementedError(msg)
return obj
class ProblemTypeTestBase(six.with_metaclass(ProblemTypeTestBaseMeta, ProblemsTest, EventsTestMixin)):
"""
Base class for testing assesment problem types in bok choy.
This inherits from ProblemsTest, which has capabilities for testing problem
features that are not problem type specific (checking, hinting, etc.).
The following attributes must be explicitly defined when inheriting from
this class:
problem_name (str)
problem_type (str)
factory (ResponseXMLFactory subclass instance)
Additionally, the default values for factory_kwargs and status_indicators
may need to be overridden for some problem types.
"""
problem_name = None
problem_type = None
problem_points = 1
factory = None
factory_kwargs = {}
status_indicators = {
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered'],
'submitted': ['span.submitted'],
'unsubmitted': ['.unsubmitted']
}
def setUp(self):
"""
Visits courseware_page and defines self.problem_page.
"""
super(ProblemTypeTestBase, self).setUp()
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
def get_sequential(self):
""" Allow any class in the inheritance chain to customize subsection metadata."""
return XBlockFixtureDesc('sequential', 'Test Subsection', metadata=getattr(self, 'sequential_metadata', {}))
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'always', 'show_reset_button': True}
)
def wait_for_status(self, status):
"""
Waits for the expected status indicator.
Args:
status: one of ("correct", "incorrect", "unanswered", "submitted")
"""
msg = u"Wait for status to be {}".format(status)
selector = ', '.join(self.status_indicators[status])
self.problem_page.wait_for_element_visibility(selector, msg)
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
@abstractmethod
def answer_problem(self, correctness):
"""
Args:
`correct` (bool): Inputs correct answer if True, else inputs
incorrect answer.
"""
raise NotImplementedError()
class ProblemTypeA11yTestMixin(object):
"""
Shared a11y tests for all problem types.
"""
@attr('a11y')
def test_problem_type_a11y(self):
"""
Run accessibility audit for the problem type.
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Set the scope to the problem container
self.problem_page.a11y_audit.config.set_scope(
include=['div#seq_content']
)
# Run the accessibility audit.
self.problem_page.a11y_audit.check_for_accessibility_errors()
@ddt.ddt
class ProblemTypeTestMixin(ProblemTypeA11yTestMixin):
"""
Test cases shared amongst problem types.
"""
can_submit_blank = False
can_update_save_notification = True
@attr(shard=11)
def test_answer_correctly(self):
"""
Scenario: I can answer a problem correctly
Given External graders respond "correct"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "correctly"
Then my "<ProblemType>" answer is marked "correct"
And The "<ProblemType>" problem displays a "correct" answer
And a success notification is shown
And clicking on "Review" moves focus to the problem meta area
And a "problem_check" server event is emitted
And a "problem_check" browser event is emitted
"""
# Make sure we're looking at the right problem
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Answer the problem correctly
self.answer_problem(correctness='correct')
self.problem_page.click_submit()
self.wait_for_status('correct')
self.problem_page.wait_success_notification()
# Check that clicking on "Review" goes to the problem meta location
self.problem_page.click_review_in_notification(notification_type='submit')
self.problem_page.wait_for_focus_on_problem_meta()
# Check for corresponding tracking event
expected_events = [
{
'event_source': 'server',
'event_type': 'problem_check',
'username': self.username,
}, {
'event_source': 'browser',
'event_type': 'problem_check',
'username': self.username,
},
]
for event in expected_events:
self.wait_for_events(event_filter=event, number_of_matches=1)
@attr(shard=11)
def test_answer_incorrectly(self):
"""
Scenario: I can answer a problem incorrectly
Given External graders respond "incorrect"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "incorrectly"
Then my "<ProblemType>" answer is marked "incorrect"
And The "<ProblemType>" problem displays a "incorrect" answer
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Answer the problem incorrectly
self.answer_problem(correctness='incorrect')
self.problem_page.click_submit()
self.wait_for_status('incorrect')
self.problem_page.wait_incorrect_notification()
@attr(shard=11)
def test_submit_blank_answer(self):
"""
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
When I submit a problem
Then my "<ProblemType>" answer is marked "incorrect"
And The "<ProblemType>" problem displays a "blank" answer
"""
if not self.can_submit_blank:
pytest.skip("Test incompatible with the current problem type")
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Leave the problem unchanged and assure submit is disabled.
self.wait_for_status('unanswered')
self.assertFalse(self.problem_page.is_submit_disabled())
self.problem_page.click_submit()
self.wait_for_status('incorrect')
@attr(shard=11)
def test_cant_submit_blank_answer(self):
"""
Scenario: I can't submit a blank answer
When I try to submit blank answer
Then I can't submit a problem
"""
if self.can_submit_blank:
pytest.skip("Test incompatible with the current problem type")
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
self.assertTrue(self.problem_page.is_submit_disabled())
@attr(shard=12)
def test_can_show_answer(self):
"""
Scenario: Verifies that show answer button is working as expected.
Given that I am on courseware page
And I can see a CAPA problem with show answer button
When I click "Show Answer" button
And I should see question's solution
And I should see the problem title is focused
"""
self.problem_page.click_show()
self.problem_page.wait_for_show_answer_notification()
@attr(shard=12)
def test_save_reaction(self):
"""
Scenario: Verify that the save button performs as expected with problem types
Given that I am on a problem page
And I can see a CAPA problem with the Save button present
When I select and answer and click the "Save" button
Then I should see the Save notification
And the Save button should not be disabled
And clicking on "Review" moves focus to the problem meta area
And if I change the answer selected
Then the Save notification should be removed
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
self.problem_page.wait_for_page()
self.answer_problem(correctness='correct')
self.assertTrue(self.problem_page.is_save_button_enabled())
self.problem_page.click_save()
# Ensure "Save" button is enabled after save is complete.
self.assertTrue(self.problem_page.is_save_button_enabled())
self.problem_page.wait_for_save_notification()
# Check that clicking on "Review" goes to the problem meta location
self.problem_page.click_review_in_notification(notification_type='save')
self.problem_page.wait_for_focus_on_problem_meta()
# Not all problems will detect the change and remove the save notification
if self.can_update_save_notification:
self.answer_problem(correctness='incorrect')
self.assertFalse(self.problem_page.is_save_notification_visible())
@attr(shard=12)
def test_reset_shows_errors(self):
"""
Scenario: Reset will show server errors
If I reset a problem without first answering it
Then a "gentle notification" is shown
And the focus moves to the "gentle notification"
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
self.wait_for_status('unanswered')
self.assertFalse(self.problem_page.is_gentle_alert_notification_visible())
# Click reset without first answering the problem (possible because show_reset_button is set to True)
self.problem_page.click_reset()
self.problem_page.wait_for_gentle_alert_notification()
@attr(shard=12)
def test_partially_complete_notifications(self):
"""
Scenario: If a partially correct problem is submitted the correct notification is shown
If I submit an answer that is partially correct
Then the partially correct notification should be shown
"""
# Not all problems have partially correct solutions configured
if not self.partially_correct:
pytest.skip("Test incompatible with the current problem type")
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
self.wait_for_status('unanswered')
# Set an answer
self.answer_problem(correctness='partially-correct')
self.problem_page.click_submit()
self.problem_page.wait_partial_notification()
@ddt.data('correct', 'incorrect')
def test_reset_problem(self, correctness):
"""
Scenario: I can reset a problem
Given I am viewing a problem with randomization: always and with reset button: on
And I answer a problem as <correctness>
When I reset the problem
Then my answer is marked "unanswered"
And The problem displays a "blank" answer
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.problem_page.click_reset()
self.assertTrue(self.problem_status('unanswered'))
@ddt.ddt
class ChangingAnswerOfProblemTestMixin(object):
"""
Test the effect of changing the answers of problem
"""
@ddt.data(['correct', '1/1 point (ungraded)'], ['incorrect', '0/1 point (ungraded)'])
@ddt.unpack
def test_checkbox_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on problem when I answer it and after I reset it
Given I am viewing problem
When I answer problem with <correctness>
Then I should see a <score>
When I reset the problem
Then I should see a score of points possible: 0/1 point (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a problem after changing my answer
Given I am viewing problem
Then my problem's answer is marked "unanswered"
When I answer and submit the problem with <initial correctness>
Then my problem's answer is marked with <initial correctness>
And I input an answer as <other correctness>
Then my problem's answer is marked "unanswered"
"""
self.assertTrue(self.problem_status('unanswered'))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.answer_problem(other_correctness)
self.assertTrue(self.problem_status('unanswered'))
@ddt.ddt
class NonRandomizedProblemTypeTestMixin(ProblemTypeA11yTestMixin):
"""
Test the effect of 'randomization: never'
"""
can_submit_blank = False
can_update_save_notification = True
def test_non_randomized_problem_correctly(self):
"""
Scenario: The reset button doesn't show up
Given I am viewing a problem with "randomization": never and with "reset button": on
And I answer problem problem problem correctly
Then The "Reset" button does not appear
"""
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertFalse(self.problem_page.is_reset_button_present())
def test_non_randomized_problem_incorrectly(self):
"""
Scenario: I can reset a non-randomized problem that I answered incorrectly
Given I am viewing problem with "randomization": never and with "reset button": on
And I answer problem incorrectly
When I reset the problem
Then my problem answer is marked "unanswered"
And the problem problem displays a "blank" answer
"""
self.answer_problem("incorrect")
self.problem_page.click_submit()
self.problem_page.click_reset()
self.assertTrue(self.problem_status('unanswered'))
@ddt.ddt
class ProblemNeverShowCorrectnessMixin(object):
"""
Tests the effect of adding `show_correctness: never` to the sequence metadata
for subclasses of ProblemTypeTestMixin.
"""
sequential_metadata = {'show_correctness': 'never'}
@attr(shard=7)
@ddt.data('correct', 'incorrect', 'partially-correct')
def test_answer_says_submitted(self, correctness):
"""
Scenario: I can answer a problem <Correctness>ly
Given External graders respond "<Correctness>"
And I am viewing a "<ProblemType>" problem
in a subsection with show_correctness set to "never"
Then I should see a score of "N point(s) possible (ungraded, results hidden)"
When I answer a "<ProblemType>" problem "<Correctness>ly"
And the "<ProblemType>" problem displays only a "submitted" notification.
And I should see a score of "N point(s) possible (ungraded, results hidden)"
And a "problem_check" server event is emitted
And a "problem_check" browser event is emitted
"""
# Not all problems have partially correct solutions configured
if correctness == 'partially-correct' and not self.partially_correct:
pytest.skip("Test incompatible with the current problem type")
# Problem progress text depends on points possible
possible = 'possible (ungraded, results hidden)'
if self.problem_points == 1:
problem_progress = u'1 point {}'.format(possible)
else:
problem_progress = u'{} points {}'.format(self.problem_points, possible)
# Make sure we're looking at the right problem
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Learner can see that score will be hidden prior to submitting answer
self.assertEqual(self.problem_page.problem_progress_graded_value, problem_progress)
# Answer the problem correctly
self.answer_problem(correctness=correctness)
self.problem_page.click_submit()
self.wait_for_status('submitted')
self.problem_page.wait_submitted_notification()
# Score is still hidden after submitting answer
self.assertEqual(self.problem_page.problem_progress_graded_value, problem_progress)
# Check for corresponding tracking event
expected_events = [
{
'event_source': 'server',
'event_type': 'problem_check',
'username': self.username,
}, {
'event_source': 'browser',
'event_type': 'problem_check',
'username': self.username,
},
]
for event in expected_events:
self.wait_for_events(event_filter=event, number_of_matches=1)
class AnnotationProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Annotation Problem Type
"""
problem_name = 'ANNOTATION TEST PROBLEM'
problem_type = 'annotationresponse'
problem_points = 2
factory = AnnotationResponseXMLFactory()
partially_correct = True
can_submit_blank = True
can_update_save_notification = False
factory_kwargs = {
'title': 'Annotation Problem',
'text': 'The text being annotated',
'comment': 'What do you think the about this text?',
'comment_prompt': 'Type your answer below.',
'tag_prompt': 'Which of these items most applies to the text?',
'options': [
('dog', 'correct'),
('cat', 'incorrect'),
('fish', 'partially-correct'),
]
}
status_indicators = {
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'partially-correct': ['span.partially-correct'],
'unanswered': ['span.unanswered'],
'submitted': ['span.submitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for AnnotationProblemTypeBase
"""
super(AnnotationProblemTypeBase, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
'label', # TODO: AC-491
'label-title-only', # TODO: AC-493
]
})
def answer_problem(self, correctness):
"""
Answer annotation problem.
"""
if correctness == 'correct':
choice = 0
elif correctness == 'partially-correct':
choice = 2
else:
choice = 1
answer = 'Student comment'
self.problem_page.q(css='div.problem textarea.comment').fill(answer)
self.problem_page.q(
css='div.problem span.tag'.format(choice=choice)
).nth(choice).click()
class AnnotationProblemTypeTest(AnnotationProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Annotation Problem Type
"""
shard = 24
pass
class AnnotationProblemTypeNeverShowCorrectnessTest(AnnotationProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Annotation Problem Type problems.
"""
pass
class CheckboxProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization Checkbox Problem Type
"""
problem_name = 'CHECKBOX TEST PROBLEM'
problem_type = 'checkbox'
partially_correct = True
factory = ChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and Choice 2, Choice 1 and Choice 3 together are incorrect.',
'choice_type': 'checkbox',
'credit_type': 'edc',
'choices': [True, False, True, False],
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'],
'explanation_text': 'This is explanation text'
}
def answer_problem(self, correctness):
"""
Answer checkbox problem.
"""
if correctness == 'correct':
self.problem_page.click_choice("choice_0")
self.problem_page.click_choice("choice_2")
elif correctness == 'partially-correct':
self.problem_page.click_choice("choice_2")
else:
self.problem_page.click_choice("choice_1")
self.problem_page.click_choice("choice_3")
@ddt.ddt
class CheckboxProblemTypeTest(CheckboxProblemTypeBase, ProblemTypeTestMixin, ChangingAnswerOfProblemTestMixin):
"""
Standard tests for the Checkbox Problem Type
"""
shard = 24
def test_can_show_answer(self):
"""
Scenario: Verifies that show answer button is working as expected.
Given that I am on courseware page
And I can see a CAPA problem with show answer button
When I click "Show Answer" button
And I should see question's solution
And I should see correct choices highlighted
"""
self.problem_page.click_show()
self.assertTrue(self.problem_page.is_solution_tag_present())
self.assertTrue(self.problem_page.is_correct_choice_highlighted(correct_choices=[1, 3]))
self.problem_page.wait_for_show_answer_notification()
class CheckboxProblemTypeTestNonRandomized(CheckboxProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for the non-randomized checkbox problem
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class CheckboxProblemTypeNeverShowCorrectnessTest(CheckboxProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Checkbox Problem Type problems.
"""
pass
@ddt.ddt
class MultipleChoiceProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization Multiple Choice Problem Type
"""
problem_name = 'MULTIPLE CHOICE TEST PROBLEM'
problem_type = 'multiple choice'
factory = MultipleChoiceResponseXMLFactory()
partially_correct = False
factory_kwargs = {
'question_text': 'The correct answer is Choice 2',
'choices': [False, False, True, False],
'choice_names': ['choice_0', 'choice_1', 'choice_2', 'choice_3'],
}
status_indicators = {
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
'submitted': ['label.choicegroup_submitted', 'span.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer multiple choice problem.
"""
if correctness == 'incorrect':
self.problem_page.click_choice("choice_choice_1")
else:
self.problem_page.click_choice("choice_choice_2")
@ddt.ddt
class MultipleChoiceProblemTypeTest(MultipleChoiceProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Multiple Choice Problem Type
"""
shard = 24
def test_can_show_answer(self):
"""
Scenario: Verifies that show answer button is working as expected.
Given that I am on courseware page
And I can see a CAPA problem with show answer button
When I click "Show Answer" button
The correct answer is displayed with a single correctness indicator.
"""
# Click the correct answer, but don't submit yet. No correctness shows.
self.answer_problem('correct')
self.assertFalse(self.problem_page.is_correct_choice_highlighted(correct_choices=[3]))
# After submit, the answer should be marked as correct.
self.problem_page.click_submit()
self.assertTrue(self.problem_page.is_correct_choice_highlighted(correct_choices=[3], show_answer=False))
# Switch to an incorrect answer. This will hide the correctness indicator.
self.answer_problem('incorrect')
self.assertFalse(self.problem_page.is_correct_choice_highlighted(correct_choices=[3]))
# Now click Show Answer. A single correctness indicator should be shown.
self.problem_page.click_show()
self.assertTrue(self.problem_page.is_correct_choice_highlighted(correct_choices=[3]))
# Finally, make sure that clicking Show Answer moved focus to the correct place.
self.problem_page.wait_for_show_answer_notification()
@ddt.ddt
class MultipleChoiceProblemResetCorrectnessAfterChangingAnswerTest(MultipleChoiceProblemTypeBase):
"""
Tests for Multiple choice problem with changing answers
"""
shard = 24
@ddt.data(['correct', '1/1 point (ungraded)'], ['incorrect', '0/1 point (ungraded)'])
@ddt.unpack
def test_mcq_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on a multiple choice problem when I answer it and after I reset it
Given I am viewing a multiple choice problem
When I answer a multiple choice problem <correctness>
Then I should see a <score>
When I reset the problem
Then I should see a score of points possible: 0/1 point (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a multiple choice problem after changing my answer
Given I am viewing a multiple choice problem
When I answer a multiple choice problem <initial_correctness>
Then my multiple choice answer is marked <initial_correctness>
And I reset the problem
Then my multiple choice answer is NOT marked <initial_correctness>
And my multiple choice answer is NOT marked <other_correctness>
"""
self.assertTrue(self.problem_status("unanswered"))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.problem_page.click_reset()
self.assertFalse(self.problem_status(initial_correctness))
self.assertFalse(self.problem_status(other_correctness))
@ddt.ddt
class MultipleChoiceProblemTypeTestNonRandomized(MultipleChoiceProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized multiple choice problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True, 'max_attempts': 3}
)
def test_non_randomized_multiple_choice_with_multiple_attempts(self):
"""
Scenario: I can answer a problem with multiple attempts correctly but cannot reset because randomization is off
Given I am viewing a randomization "never" "multiple choice" problem with "3" attempts with reset
Then I should see "You have used 0 of 3 attempts" somewhere in the page
When I answer a "multiple choice" problem "correctly"
Then The "Reset" button does not appear
"""
self.assertEqual(
self.problem_page.submission_feedback,
"You have used 0 of 3 attempts",
"All 3 attempts are not available"
)
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertFalse(self.problem_page.is_reset_button_present())
class MultipleChoiceProblemTypeTestOneAttempt(MultipleChoiceProblemTypeBase):
"""
Test Multiple choice problem with single attempt
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True, 'max_attempts': 1}
)
def test_answer_with_one_attempt_correctly(self):
"""
Scenario: I can answer a problem with one attempt correctly and can not reset
Given I am viewing a "multiple choice" problem with "1" attempt
When I answer a "multiple choice" problem "correctly"
Then The "Reset" button does not appear
"""
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertFalse(self.problem_page.is_reset_button_present())
class MultipleChoiceProblemTypeTestMultipleAttempt(MultipleChoiceProblemTypeBase):
"""
Test Multiple choice problem with multiple attempts
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'always', 'show_reset_button': True, 'max_attempts': 3}
)
def test_answer_with_multiple_attempt_correctly(self):
"""
Scenario: I can answer a problem with multiple attempts correctly and still reset the problem
Given I am viewing a "multiple choice" problem with "3" attempts
Then I should see "You have used 0 of 3 attempts" somewhere in the page
When I answer a "multiple choice" problem "correctly"
Then The "Reset" button does appear
"""
self.assertEqual(
self.problem_page.submission_feedback,
"You have used 0 of 3 attempts",
"All 3 attempts are not available"
)
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertTrue(self.problem_page.is_reset_button_present())
def test_learner_can_see_attempts_left(self):
"""
Scenario: I can view how many attempts I have left on a problem
Given I am viewing a "multiple choice" problem with "3" attempts
Then I should see "You have used 0 of 3 attempts" somewhere in the page
When I answer a "multiple choice" problem "incorrectly"
And I reset the problem
Then I should see "You have used 1 of 3 attempts" somewhere in the page
When I answer a "multiple choice" problem "incorrectly"
And I reset the problem
Then I should see "You have used 2 of 3 attempts" somewhere in the page
And The "Submit" button does appear
When I answer a "multiple choice" problem "correctly"
Then The "Reset" button does not appear
"""
for attempts_used in range(3):
self.assertEqual(
self.problem_page.submission_feedback,
u"You have used {} of 3 attempts".format(str(attempts_used)),
"All 3 attempts are not available"
)
if attempts_used == 2:
self.assertTrue(self.problem_page.is_submit_disabled())
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertFalse(self.problem_page.is_reset_button_present())
else:
self.answer_problem("incorrect")
self.problem_page.click_submit()
self.problem_page.click_reset()
class MultipleChoiceProblemTypeNeverShowCorrectnessTest(MultipleChoiceProblemTypeBase,
ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Multiple Choice Problem Type problems.
"""
pass
class RadioProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Radio Problem Type
"""
problem_name = 'RADIO TEST PROBLEM'
problem_type = 'radio'
partially_correct = False
factory = ChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 2',
'choice_type': 'radio',
'choices': [False, False, True, False],
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'],
}
status_indicators = {
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
'submitted': ['label.choicegroup_submitted', 'span.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer radio problem.
"""
if correctness == 'correct':
self.problem_page.click_choice("choice_2")
else:
self.problem_page.click_choice("choice_1")
@ddt.ddt
class RadioProblemTypeTest(RadioProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Multiple Radio Problem Type
"""
shard = 24
pass
@ddt.ddt
class RadioProblemResetCorrectnessAfterChangingAnswerTest(RadioProblemTypeBase):
"""
Tests for Radio problem with changing answers
"""
shard = 24
@ddt.data(['correct', '1/1 point (ungraded)'], ['incorrect', '0/1 point (ungraded)'])
@ddt.unpack
def test_radio_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on a radio problem when I answer it and after I reset it
Given I am viewing a radio problem
When I answer a radio problem <correctness>
Then I should see a <score>
When I reset the problem
Then I should see a score of points possible: 0/1 point (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a radio problem after changing my answer
Given I am viewing a radio problem
When I answer a radio problem with <initial_correctness>
Then my radio answer is marked <initial_correctness>
And I reset the problem
Then my radio problem's answer is NOT marked <initial_correctness>
And my radio problem's answer is NOT marked <other_correctness>
"""
self.assertTrue(self.problem_status("unanswered"))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.problem_page.click_reset()
self.assertFalse(self.problem_status(initial_correctness))
self.assertFalse(self.problem_status(other_correctness))
class RadioProblemTypeTestNonRandomized(RadioProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized radio problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class RadioProblemTypeNeverShowCorrectnessTest(RadioProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Radio Problem Type problems.
"""
pass
class DropDownProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Drop Down Problem Type
"""
problem_name = 'DROP DOWN TEST PROBLEM'
problem_type = 'drop down'
partially_correct = False
factory = OptionResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Option 2',
'options': ['Option 1', 'Option 2', 'Option 3', 'Option 4'],
'correct_option': 'Option 2'
}
def answer_problem(self, correctness):
"""
Answer drop down problem.
"""
answer = 'Option 2' if correctness == 'correct' else 'Option 3'
selector_element = self.problem_page.q(
css='.problem .option-input select')
select_option_by_text(selector_element, answer)
@ddt.ddt
class DropdownProblemTypeTest(DropDownProblemTypeBase, ProblemTypeTestMixin, ChangingAnswerOfProblemTestMixin):
"""
Standard tests for the Dropdown Problem Type
"""
shard = 24
pass
@ddt.ddt
class DropDownProblemTypeTestNonRandomized(DropDownProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Dropdown problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class DropDownProblemTypeNeverShowCorrectnessTest(DropDownProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Drop Down Problem Type problems.
"""
pass
class StringProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for String Problem Type
"""
problem_name = 'STRING TEST PROBLEM'
problem_type = 'string'
partially_correct = False
factory = StringResponseXMLFactory()
factory_kwargs = {
'question_text': 'The answer is "correct string"',
'case_sensitive': False,
'answer': 'correct string',
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
'submitted': ['span.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer string problem.
"""
textvalue = 'correct string' if correctness == 'correct' else 'incorrect string'
self.problem_page.fill_answer(textvalue)
class StringProblemTypeTest(StringProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the String Problem Type
"""
shard = 24
pass
class StringProblemTypeNeverShowCorrectnessTest(StringProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for String Problem Type problems.
"""
pass
class NumericalProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Numerical Problem Type
"""
problem_name = 'NUMERICAL TEST PROBLEM'
problem_type = 'numerical'
partially_correct = False
factory = NumericalResponseXMLFactory()
factory_kwargs = {
'question_text': 'The answer is pi + 1',
'answer': '4.14159',
'tolerance': '0.00001',
'math_display': True,
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
'submitted': ['div.submitted'],
'unsubmitted': ['div.unsubmitted']
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer numerical problem.
"""
textvalue = ''
if correctness == 'correct':
textvalue = "pi + 1"
elif correctness == 'error':
textvalue = 'notNum'
else:
textvalue = str(random.randint(-2, 2))
self.problem_page.fill_answer(textvalue)
@ddt.ddt
class NumericalProblemTypeTest(NumericalProblemTypeBase, ProblemTypeTestMixin, ChangingAnswerOfProblemTestMixin):
"""
Standard tests for the Numerical Problem Type
"""
shard = 12
def test_error_input_gentle_alert(self):
"""
Scenario: I can answer a problem with erroneous input and will see a gentle alert
Given a Numerical Problem type
I can input a string answer
Then I will see a Gentle alert notification
And focus will shift to that notification
And clicking on "Review" moves focus to the problem meta area
"""
# Make sure we're looking at the right problem
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Answer the problem with an erroneous input to cause a gentle alert
self.assertFalse(self.problem_page.is_gentle_alert_notification_visible())
self.answer_problem(correctness='error')
self.problem_page.click_submit()
self.problem_page.wait_for_gentle_alert_notification()
# Check that clicking on "Review" goes to the problem meta location
self.problem_page.click_review_in_notification(notification_type='gentle-alert')
self.problem_page.wait_for_focus_on_problem_meta()
@ddt.ddt
class NumericalProblemTypeTestNonRandomized(NumericalProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Numerical problem
"""
shard = 12
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class NumericalProblemTypeTestViewAnswer(NumericalProblemTypeBase):
"""
Test learner can view Numerical problem's answer
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'showanswer': 'always'}
)
def test_learner_can_view_answer(self):
"""
Scenario: I can view the answer if the problem has it:
Given I am viewing a "numerical" that shows the answer "always"
When I press the button with the label "Show Answer"
And I should see "4.14159" somewhere in the page
"""
self.problem_page.click_show()
self.assertEqual(self.problem_page.answer, '4.14159')
class NumericalProblemTypeNeverShowCorrectnessTest(NumericalProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Numerical Problem Type problems.
"""
pass
@ddt.ddt
class FormulaProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Formula Problem Type
"""
problem_name = 'FORMULA TEST PROBLEM'
problem_type = 'formula'
partially_correct = False
factory = FormulaResponseXMLFactory()
factory_kwargs = {
'question_text': 'The solution is [mathjax]x^2+2x+y[/mathjax]',
'sample_dict': {'x': (-100, 100), 'y': (-100, 100)},
'num_samples': 10,
'tolerance': 0.00001,
'math_display': True,
'answer': 'x^2+2*x+y',
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
'submitted': ['div.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer formula problem.
"""
textvalue = "x^2+2*x+y" if correctness == 'correct' else 'x^2'
self.problem_page.fill_answer(textvalue)
@ddt.ddt
class FormulaProblemTypeTest(FormulaProblemTypeBase, ProblemTypeTestMixin, ChangingAnswerOfProblemTestMixin):
"""
Standard tests for the Formula Problem Type
"""
shard = 24
pass
class FormulaProblemTypeTestNonRandomized(FormulaProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Formula problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class FormulaProblemTypeNeverShowCorrectnessTest(FormulaProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Formula Problem Type problems.
"""
pass
@ddt.ddt
class ScriptProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Script Problem Type
"""
problem_name = 'SCRIPT TEST PROBLEM'
problem_type = 'script'
problem_points = 2
partially_correct = False
factory = CustomResponseXMLFactory()
factory_kwargs = {
'cfn': 'test_add_to_ten',
'expect': '10',
'num_inputs': 2,
'question_text': 'Enter two integers that sum to 10.',
'input_element_label': 'Enter an integer',
'script': textwrap.dedent("""
def test_add_to_ten(expect,ans):
try:
a1=int(ans[0])
a2=int(ans[1])
except ValueError:
a1=0
a2=0
return (a1+a2)==int(expect)
"""),
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
'submitted': ['div.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status is present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer script problem.
"""
# Correct answer is any two integers that sum to 10
first_addend = random.randint(-100, 100)
second_addend = 10 - first_addend
# If we want an incorrect answer, then change
# the second addend so they no longer sum to 10
if not correctness == 'correct':
second_addend += random.randint(1, 10)
self.problem_page.fill_answer(first_addend, input_num=0)
self.problem_page.fill_answer(second_addend, input_num=1)
@ddt.ddt
class ScriptProblemTypeTest(ScriptProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Script Problem Type
"""
shard = 24
pass
@ddt.ddt
class ScriptProblemResetAfterAnswerTest(ScriptProblemTypeBase):
"""
Test Script problem by resetting answers
"""
shard = 24
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a problem after changing my answer
Given I am viewing a script problem
Then my script problem's answer is marked "unanswered"
When I answer a script problem initial correctness
And I input an answer on a script problem other correctness
Then my script problem answer is marked "unanswered"
"""
self.assertTrue(self.problem_status('unanswered'))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.answer_problem(other_correctness)
self.assertTrue(self.problem_status('unanswered'))
@ddt.data(['correct', '2/2 points (ungraded)'], ['incorrect', '0/2 points (ungraded)'])
@ddt.unpack
def test_script_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on a script problem when I answer it and after I reset it
Given I am viewing a script problem
When I answer a script problem correct/incorrect
Then I should see a score
When I reset the problem
Then I should see a score of points possible: 0/2 points (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/2 points (ungraded)')
class ScriptProblemTypeTestNonRandomized(ScriptProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Script problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class ScriptProblemTypeNeverShowCorrectnessTest(ScriptProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Script Problem Type problems.
"""
pass
class JSInputTypeTest(ProblemTypeTestBase, ProblemTypeA11yTestMixin):
"""
TestCase Class for jsinput (custom JavaScript) problem type.
Right now the only test point that is executed is the a11y test.
This is because the factory simply creates an empty iframe.
"""
problem_name = 'JSINPUT PROBLEM'
problem_type = 'customresponse'
factory = JSInputXMLFactory()
factory_kwargs = {
'question_text': 'IFrame shows below (but has no content)'
}
def answer_problem(self, correctness):
"""
Problem is not set up to work (displays an empty iframe), but this method must
be extended because the parent class has marked it as abstract.
"""
raise NotImplementedError()
class CodeProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Code Problem Type
"""
problem_name = 'CODE TEST PROBLEM'
problem_type = 'code'
partially_correct = False
can_update_save_notification = False
factory = CodeResponseXMLFactory()
factory_kwargs = {
'question_text': 'Submit code to an external grader',
'initial_display': 'print "Hello world!"',
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}',
}
status_indicators = {
'correct': ['.grader-status .correct ~ .debug'],
'incorrect': ['.grader-status .incorrect ~ .debug'],
'unanswered': ['.grader-status .unanswered ~ .debug'],
'submitted': ['.grader-status .submitted ~ .debug'],
}
def answer_problem(self, correctness):
"""
Answer code problem.
"""
# The fake xqueue server is configured to respond
# correct / incorrect no matter what we submit.
# Furthermore, since the inline code response uses
# JavaScript to make the code display nicely, it's difficult
# to programatically input text
# (there's not <textarea> we can just fill text into)
# For this reason, we submit the initial code in the response
# (configured in the problem XML above)
pass
class CodeProblemTypeTest(CodeProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Code Problem Type
"""
shard = 12
def test_answer_incorrectly(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
def test_submit_blank_answer(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
def test_cant_submit_blank_answer(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
def wait_for_status(self, status):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
class CodeProblemTypeNeverShowCorrectnessTest(CodeProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Code Problem Type problems.
"""
pass
class ChoiceTextProblemTypeTestBase(ProblemTypeTestBase):
"""
Base class for "Choice + Text" Problem Types.
(e.g. RadioText, CheckboxText)
"""
choice_type = None
partially_correct = False
can_update_save_notification = False
def _select_choice(self, input_num):
"""
Selects the nth (where n == input_num) choice of the problem.
"""
self.problem_page.q(
css=u'div.problem input.ctinput[type="{}"]'.format(self.choice_type)
).nth(input_num).click()
def _fill_input_text(self, value, input_num):
"""
Fills the nth (where n == input_num) text input field of the problem
with value.
"""
self.problem_page.q(
css='div.problem input.ctinput[type="text"]'
).nth(input_num).fill(value)
def answer_problem(self, correctness):
"""
Answer radio text problem.
"""
choice = 0 if correctness == 'correct' else 1
input_value = "8" if correctness == 'correct' else "5"
self._select_choice(choice)
self._fill_input_text(input_value, choice)
class RadioTextProblemTypeBase(ChoiceTextProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Radio Text Problem Type
"""
problem_name = 'RADIO TEXT TEST PROBLEM'
problem_type = 'radio_text'
choice_type = 'radio'
partially_correct = False
can_update_save_notification = False
factory = ChoiceTextResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'radiotextgroup',
'choices': [
("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"}),
],
}
status_indicators = {
'correct': ['section.choicetextgroup_correct'],
'incorrect': ['section.choicetextgroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
'submitted': ['section.choicetextgroup_submitted', 'span.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def setUp(self, *args, **kwargs):
"""
Additional setup for RadioTextProblemTypeBase
"""
super(RadioTextProblemTypeBase, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
'radiogroup', # TODO: AC-491
'label', # TODO: AC-491
'section', # TODO: AC-491
]
})
@ddt.ddt
class RadioTextProblemTypeTest(RadioTextProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Radio Text Problem Type
"""
shard = 24
pass
@ddt.ddt
class RadioTextProblemResetCorrectnessAfterChangingAnswerTest(RadioTextProblemTypeBase):
"""
Tests for Radio Text problem with changing answers
"""
shard = 24
@ddt.data(['correct', '1/1 point (ungraded)'], ['incorrect', '0/1 point (ungraded)'])
@ddt.unpack
def test_mcq_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on a radio text problem when I answer it and after I reset it
Given I am viewing a radio text problem
When I answer a radio text problem correct/incorrect
Then I should see a score
When I reset the problem
Then I should see a score of points possible: (1/1 point (ungraded) -- 0/1 point (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a multiple choice problem after changing my answer
Given I am viewing a radio text problem
When I answer a radio text problem InitialCorrectness
Then my radio text answer is marked InitialCorrectness
And I reset the problem
Then my answer is NOT marked InitialCorrectness
And my answer is NOT marked OtherCorrectness
"""
self.assertTrue(self.problem_status("unanswered"))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.problem_page.click_reset()
self.assertFalse(self.problem_status(initial_correctness))
self.assertFalse(self.problem_status(other_correctness))
class RadioTextProblemTypeTestNonRandomized(RadioTextProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Radio text problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class RadioTextProblemTypeNeverShowCorrectnessTest(RadioTextProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Radio + Text Problem Type problems.
"""
pass
class CheckboxTextProblemTypeBase(ChoiceTextProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Checkbox Text Problem Type
"""
problem_name = 'CHECKBOX TEXT TEST PROBLEM'
problem_type = 'checkbox_text'
choice_type = 'checkbox'
factory = ChoiceTextResponseXMLFactory()
partially_correct = False
can_update_save_notification = False
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'checkboxtextgroup',
'choices': [
("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"}),
],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for CheckboxTextProblemTypeBase
"""
super(CheckboxTextProblemTypeBase, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
'checkboxgroup', # TODO: AC-491
'label', # TODO: AC-491
'section', # TODO: AC-491
]
})
class CheckboxTextProblemTypeTest(CheckboxTextProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Checkbox Text Problem Type
"""
pass
class CheckboxTextProblemTypeTestNonRandomized(CheckboxTextProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Checkbox problem
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class CheckboxTextProblemTypeNeverShowCorrectnessTest(CheckboxTextProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Checkbox + Text Problem Type problems.
"""
pass
class ImageProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Image Problem Type
"""
problem_name = 'IMAGE TEST PROBLEM'
problem_type = 'image'
partially_correct = False
factory = ImageResponseXMLFactory()
can_submit_blank = True
can_update_save_notification = False
factory_kwargs = {
'src': '/static/images/placeholder-image.png',
'rectangle': '(0,0)-(50,50)',
}
def answer_problem(self, correctness):
"""
Answer image problem.
"""
offset = 25 if correctness == 'correct' else -25
input_selector = ".imageinput [id^='imageinput_'] img"
input_element = self.problem_page.q(css=input_selector)[0]
chain = ActionChains(self.browser)
chain.move_to_element(input_element)
chain.move_by_offset(offset, offset)
chain.click()
chain.perform()
@ddt.ddt
class ImageProblemTypeTest(ImageProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Image Problem Type
"""
def test_image_problem_score_with_blank_answer(self):
"""
Scenario: I can see my score on a problem to which I submit a blank answer
Given I am viewing aN image problem
When I submit a problem
Then I should see a score of Points Possible: 0/1 point (ungraded)
"""
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
class ImageProblemTypeTestNonRandomized(ImageProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Image problem
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class ImageProblemTypeNeverShowCorrectnessTest(ImageProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Image Problem Type problems.
"""
pass
class SymbolicProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Symbolic Problem Type
"""
problem_name = 'SYMBOLIC TEST PROBLEM'
problem_type = 'symbolicresponse'
partially_correct = False
factory = SymbolicResponseXMLFactory()
factory_kwargs = {
'expect': '2*x+3*y',
'question_text': 'Enter a value'
}
status_indicators = {
'correct': ['div.capa_inputtype div.correct'],
'incorrect': ['div.capa_inputtype div.incorrect'],
'unanswered': ['div.capa_inputtype div.unanswered'],
'submitted': ['div.capa_inputtype div.submitted'],
}
def answer_problem(self, correctness):
"""
Answer symbolic problem.
"""
choice = "2*x+3*y" if correctness == 'correct' else "3*a+4*b"
self.problem_page.fill_answer(choice)
class SymbolicProblemTypeTest(SymbolicProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Symbolic Problem Type
"""
pass
class SymbolicProblemTypeNeverShowCorrectnessTest(SymbolicProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Symbolic Problem Type problems.
"""
pass
| agpl-3.0 | 4,416,508,447,962,702,000 | 33.496201 | 119 | 0.638412 | false |
energicryptocurrency/energi | qa/rpc-tests/mempool_packages.py | 1 | 11060 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Energi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test descendant package tracking code
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-limitancestorcount=5", "-debug"]))
connect_nodes(self.nodes[0], 1)
self.is_network_split = False
self.sync_all()
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for i in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert(chain[-1] not in v_ancestors.keys())
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert(chain[0] not in v_descendants.keys())
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(chain[0], 0, 1000)
mempool = self.nodes[0].getrawmempool(True)
ancestor_fees = 0
for x in chain:
ancestor_fees += mempool[x]['fee']
assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(chain[0], 0, -1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(chain[-1], 0, 1000)
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
try:
self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
except JSONRPCException as e:
print("too-long-ancestor-chain successfully rejected")
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(chain[-1], 0, 2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
if (x == chain[-1]):
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
for i in range(MAX_DESCENDANTS):
utxo = transaction_package.pop(0)
try:
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
if i == MAX_DESCENDANTS - 2:
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
except JSONRPCException as e:
print(e.error['message'])
assert_equal(i, MAX_DESCENDANTS - 1)
print("tx that would create too large descendant package successfully rejected")
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
(tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
| mit | 7,192,508,531,699,877,000 | 41.868217 | 127 | 0.61519 | false |
diegocortassa/TACTIC | src/tactic/ui/panel/custom_layout_wdg.py | 1 | 51769 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from __future__ import print_function
__all__ = ["CustomLayoutWdg", "SObjectHeaderWdg"]
import os, types, re
import cStringIO
from pyasm.common import Xml, XmlException, Common, TacticException, Environment, Container, jsonloads, jsondumps
from pyasm.biz import Schema, ExpressionParser, Project
from pyasm.search import Search, SearchKey, WidgetDbConfig, SObject
from pyasm.web import DivWdg, SpanWdg, HtmlElement, Table, Widget, Html, WebContainer
from pyasm.widget import WidgetConfig, WidgetConfigView, IconWdg
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.container import SmartMenu
from tactic.ui.container import Menu, MenuItem, SmartMenu
from tactic_client_lib import TacticServerStub
class CustomLayoutWdg(BaseRefreshWdg):
ARGS_KEYS = {
'search_key': 'search key of the sobject to be displayed',
# or
'search_type': 'search type of the sobject to be displayed',
'code': 'code of the sobject to be displayed',
'id': {
'description': 'id of the sobject to be displayed',
'category': '_internal',
},
'sobjects_expr': 'expression to populate the sobjects for this widget',
'category': {
'description': 'category of the config to search for',
},
'search_type': {
'description': 'search type of the sobject to be displayed',
'type': 'TextWdg',
'order': 1,
},
'view': {
'description': 'The view defined in the widget config/Custom Layout Editor that contains the custom html',
'type': 'TextWdg',
'order': 2,
'category': 'Options'
},
'state': {
'description': 'State surrounding the widget',
'category': '_deprecated'
},
'html': {
'description': 'Explicitly define the html layout inline',
'type': 'TextAreaWdg',
},
'include_mako': {
'description': 'Boolean to turn on mako',
'type': 'SelectWdg',
'values': 'false|true',
},
'include': {
'description': 'Include any other config files',
'type': 'TextWdg',
#'order': '1',
}
}
def __init__(self, **kwargs):
super(CustomLayoutWdg, self).__init__(**kwargs)
def init(self):
self.server = TacticServerStub.get(protocol='local')
sobjects_expr = self.kwargs.get("sobjects_expr")
if sobjects_expr:
self.sobjects = Search.eval(sobjects_expr)
self.data = {}
# NOTE: this is is for the FilterElement Functionality
self.show_title = True
self.layout_wdg = None
self.config = None
self.def_config = None
self.sobject_dicts = None
self.is_table_element = False
self.sequence_data = []
def preprocess(self):
code = self.kwargs.get('data')
if not code:
self.data = {}
return
# preprocess using mako
#include_mako = self.kwargs.get("include_mako")
#if not include_mako:
# include_mako = self.view_attrs.get("include_mako")
from tactic.command import PythonCmd
python_cmd = PythonCmd(code=code)
self.data = python_cmd.execute()
# NOTE: this is so that a custom layout can be used as a filter ....
# however, this is not ideal because a filter requires a number of
# methods that should not be present in this class
def alter_search(self, search):
script_path = self.get_option("alter_search_script_path")
script_code = self.get_option("alter_search_script_code")
from tactic.command import PythonCmd
if script_path:
cmd = PythonCmd(script_path=script_path, values=self.values, search=search, show_title=self.show_title)
elif script_code:
cmd = PythonCmd(script_code=script_code, values=self.values, search=search, show_title=self.show_title)
cmd.execute()
def set_values(self, values):
self.values = values
def set_show_title(self, flag):
self.show_title = flag
def get_display(self):
self.sobject = self.get_current_sobject()
if not self.sobject:
self.sobject = self.get_sobject_from_kwargs()
if self.sobject and self.sobject.is_insert():
return DivWdg()
if self.sobject:
self.search_key = SearchKey.get_by_sobject(self.sobject)
self.kwargs['search_key'] = self.search_key
else:
self.search_key = self.kwargs.get('search_key')
html = self.kwargs.get('html')
if not html:
html = ""
# DEPRECATED
self.state = self.kwargs.get("state")
self.state = BaseRefreshWdg.process_state(self.state)
if not self.state:
self.state = self.kwargs
self.state['search_key'] = self.search_key
self.view = self.kwargs.get('view')
self.view = self.view.replace("/", ".")
self.view_folder = ""
if self.view.startswith("."):
self.view_folder = self.kwargs.get("__view_folder__")
if self.view_folder:
self.view = "%s%s" % (self.view_folder, self.view)
parts = self.view.split(".")
self.view_folder = ".".join(parts[:-1])
if not self.view and not html:
raise TacticException("No view defined in custom layout")
# If html is not a string, then convert it?
if not isinstance(html, basestring):
html = str(html)
self.view_attrs = {}
self.category = self.kwargs.get("category")
self.search_type = self.kwargs.get("search_type")
self.encoding = self.kwargs.get("encoding")
if not self.encoding:
self.encoding = 'utf-8'
self.plugin = None
xml = None
# if html is not provided, then get it from the config
config = None
if not html:
if self.config != None:
config = self.config
else:
config = self.kwargs.get("config")
if not config:
config = self.get_config()
if not config:
#div = DivWdg()
#div.add("No config defined for view [%s] for custom layout" % self.view)
#return div
raise TacticException("No config defined for view [%s] for custom layout" % self.view)
if isinstance(config, WidgetDbConfig):
config_str = config.get_value("config")
else:
config_str = ''
if config_str.startswith("<html>"):
html = config_str
self.def_config = None
else:
xml = config.get_xml()
if self.def_config == None:
self.def_config = self.get_def_config(xml)
# get the view attributes
if isinstance(config, WidgetConfigView):
top_config = config.get_configs()[0]
else:
top_config = config
view_node = top_config.get_view_node()
if view_node is None:
div = DivWdg("No view node found in xml. Invalid XML entry found")
return div
self.view_attrs = xml.get_attributes(view_node)
nodes = xml.get_nodes("config/%s/html/*" % self.view)
if not nodes:
div = DivWdg("No definition found")
return div
# convert html tag to a div
html = cStringIO.StringIO()
for node in nodes:
# unfortunately, html does not recognize <textarea/>
# so we have to make sure it becomes <textarea></textarea>
text = xml.to_string(node)
text = text.encode('utf-8')
keys = ['textarea','input']
for key in keys:
p = re.compile("(<%s.*?/>)" % key)
m = p.search(text)
if m:
for group in m.groups():
xx = group.replace("/", "")
xx = "%s</%s>" % (xx, key)
text = text.replace(group, xx)
text = text.replace("<%s/>" % key, "<%s></%s>" % (key, key))
# add linebreaks to element tag
key = 'element'
# reg full tag <element><display...></element>
p = re.compile(r"(<%s\b[^>]*>(?:.*?)</%s>)" % (key, key))
# short-hand tag <element/>
p1 = re.compile("(</%s>|<%s.*?/>)" %(key, key))
m = p.search(text)
m1 = p1.search(text)
if m:
for group in m.groups():
if group:
text = text.replace(group, '\n%s\n'%group)
if m1:
for group in m1.groups():
if group:
text = text.replace(group, '\n%s\n'%group)
html.write(text)
html = html.getvalue()
self.config = config
#self.def_config = config # This is unnessary?
# try to get the sobject if this is in a table element widget
if self.search_key:
try:
# this will raise an exception if it is not in a table element
sobject = self.get_current_sobject()
except:
sobject = SearchKey.get_by_search_key(self.search_key)
sobjects = [sobject]
else:
try:
# this will raise an exception if it is not in a table element
sobject = self.get_current_sobject()
if sobject:
sobjects = [sobject]
else:
sobjects = []
except:
sobject = self.sobjects
self.layout = self.get_layout_wdg()
# preprocess using mako
include_mako = self.kwargs.get("include_mako")
if not include_mako:
include_mako = self.view_attrs.get("include_mako")
if xml:
mako_node = xml.get_node("config/%s/mako" % self.view)
if mako_node is not None:
mako_str = xml.get_node_value(mako_node)
html = "<%%\n%s\n%%>\n%s" % (mako_str, html)
from pyasm.web import Palette
num_palettes = Palette.num_palettes()
#if include_mako in ['true', True]:
if include_mako not in ['false', False]:
html = html.replace("<", "<")
html = html.replace(">", ">")
html = self.process_mako(html)
# preparse out expressions
# use relative expressions - [expr]xxx[/expr]
p = re.compile('\[expr\](.*?)\[\/expr\]')
parser = ExpressionParser()
matches = p.finditer(html)
for m in matches:
full_expr = m.group()
expr = m.groups()[0]
result = parser.eval(expr, sobjects, single=True, state=self.state)
if isinstance(result, basestring):
result = Common.process_unicode_string(result)
else:
result = str(result)
html = html.replace(full_expr, result )
# use absolute expressions - [expr]xxx[/expr]
p = re.compile('\[abs_expr\](.*?)\[\/abs_expr\]')
parser = ExpressionParser()
matches = p.finditer(html)
for m in matches:
full_expr = m.group()
expr = m.groups()[0]
result = parser.eval(expr, single=True)
if isinstance(result, basestring):
result = Common.process_unicode_string(result)
else:
result = str(result)
html = html.replace(full_expr, result )
# need a top widget that can be used to refresh
top = self.top
self.set_as_panel(top)
top.add_class("spt_custom_top")
top.add_class("spt_panel")
ignore_events = self.kwargs.get("ignore_events") in ['true', True]
if ignore_events:
top.add_style("pointer-events: none")
# create the content div
content = DivWdg()
content.add_class("spt_custom_content")
content.add_style("position: relative")
if ignore_events:
content.add_style("pointer-events: none")
top.add(content)
self.content = content
is_test = Container.get("CustomLayout::is_test")
if not is_test:
is_test = self.kwargs.get("is_test") in [True, 'true']
if is_test:
Container.put("CustomLayout::is_test", True)
self.top.add_style("margin: 0px 5px")
self.handle_is_test(content)
html = self.replace_elements(html)
content.add(html)
if xml:
self.add_behaviors(content, xml)
# remove all the extra palettes created
while True:
extra_palettes = Palette.num_palettes() - num_palettes
if extra_palettes > 0:
Palette.pop_palette()
else:
break
if self.kwargs.get("is_top") in ['true', True]:
return html
elif self.kwargs.get("is_refresh"):
return content
else:
return top
def handle_is_test(self, content):
content.add_behavior( {
'type': 'mouseover',
'cbjs_action': '''
//bvr.src_el.setStyle("border", "solid 1px blue");
bvr.src_el.setStyle("box-shadow", "0px 0px 5px rgba(0, 0, 0, 0.5)");
//bvr.src_el.setStyle("margin", "-1px");
var els = bvr.src_el.getElements(".spt_test");
for (var i = 0; i < els.length; i++) {
els[i].setStyle("display", "");
break;
}
'''
} )
content.add_behavior( {
'type': 'mouseleave',
'cbjs_action': '''
bvr.src_el.setStyle("box-shadow", "");
//bvr.src_el.setStyle("margin", "0px");
var els = bvr.src_el.getElements(".spt_test");
for (var i = 0; i < els.length; i++) {
els[i].setStyle("display", "none");
break;
}
'''
} )
div = DivWdg()
content.add(div)
div.add_style("position: absolute")
div.add("View: %s" % self.view)
div.add_class("spt_test")
div.add_border()
#div.set_box_shadow("1px 1px 1px 1px")
div.add_style("display: none")
div.add_style("padding: 3px")
div.add_style("margin-left: 3px")
div.add_style("left: 0px")
div.add_style("top: -15px")
#div.add_style("opacity: 0.5")
div.add_style("inherit: false")
div.add_style("z-index: 1000")
div.add_style("background-color: white")
div.add_class("hand")
div.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_custom_top");
top.setAttribute("spt_is_test", "true");
var size = top.getSize();
top.innerHTML = "<div style='width: "+size.x+";height: "+size.y+";padding: 10px; font-weight: bold'>Loading ...</div>";
spt.panel.refresh(top);
'''
} )
# add in a context menu
menu = self.get_test_context_menu()
menus = [menu.get_data()]
menus_in = {
'TEST_CTX': menus,
}
SmartMenu.attach_smart_context_menu( div, menus_in, False )
SmartMenu.assign_as_local_activator( div, 'TEST_CTX' )
def get_test_context_menu(self):
menu = Menu(width=180)
menu.set_allow_icons(False)
menu_item = MenuItem(type='title', label='Actions')
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Refresh')
menu.add(menu_item)
menu_item.add_behavior( {
'type': 'click_up',
'view': self.view,
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var top = activator.getParent(".spt_custom_top");
top.setAttribute("spt_is_test", "true");
var size = top.getSize();
top.innerHTML = "<div style='width: "+size.x+";height: "+size.y+";padding: 10px; font-weight: bold'>Loading ...</div>";
spt.panel.refresh(top);
'''
} )
menu_item = MenuItem(type='action', label='Edit')
menu.add(menu_item)
menu_item.add_behavior( {
'type': 'click_up',
'view': self.view,
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var popup_top = activator.getParent(".spt_popup");
var top = popup_top.top;
if (top) {
top.setAttribute("spt_view", bvr.view);
spt.app_busy.show("Loading view: " + bvr.view);
spt.panel.refresh(top);
spt.app_busy.hide();
}
'''
} )
menu_item = MenuItem(type='action', label='Open in Main Tab')
menu.add(menu_item)
menu_item.add_behavior( {
'type': 'click_up',
'view': self.view,
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var popup_top = activator.getParent(".spt_popup");
spt.popup.close(popup_top);
var top = activator.getParent(".spt_custom_top");
var class_name = top.getAttribute("spt_class_name");
var kwargs = spt.panel.get_element_options(top);
//kwargs['is_test'] = true;
var title = "Test: " + bvr.view;
spt.tab.set_main_body_tab();
spt.tab.add_new(title, title, class_name, kwargs);
'''
} )
return menu
HEADER = '''<%def name='expr(expr)'><% result = server.eval(expr) %>${result}</%def>'''
def process_mako(self, html):
from mako.template import Template
from mako import exceptions
html = '%s%s' % (CustomLayoutWdg.HEADER, html)
# remove CDATA tags
html = html.replace("<![CDATA[", "")
html = html.replace("]]>", "")
#html = html.decode('utf-8')
if self.encoding == 'ascii':
template = Template(html)
else:
template = Template(html, output_encoding=self.encoding, input_encoding=self.encoding)
# get the api version of the sobject
if not self.is_table_element:
if self.sobject_dicts == None:
self.sobject_dicts = []
for sobject in self.sobjects:
sobject_dict = sobject.get_sobject_dict()
self.sobject_dicts.append(sobject_dict)
if self.sobject:
sobject = self.sobject.get_sobject_dict()
else:
sobject = {}
# find out if there is a plugin associated with this
plugin = self.kwargs.get("plugin")
if not plugin or plugin == '{}':
plugin = {}
"""
if not plugin and isinstance(self.config, SObject):
plugin = Search.eval("@SOBJECT(config/plugin_content.config/plugin)", self.config, single=True)
"""
if plugin:
if isinstance(plugin, dict):
pass
else:
plugin = plugin.get_sobject_dict()
plugin_code = plugin.get("code")
plugin_dir = self.server.get_plugin_dir(plugin)
else:
plugin_code = ""
plugin_dir = ""
plugin = {}
self.kwargs['plugin_dir'] = plugin_dir
self.kwargs['plugin_code'] = plugin_code
try:
html = template.render(server=self.server, search=Search, sobject=sobject, sobjects=self.sobject_dicts, data=self.data, plugin=plugin, kwargs=self.kwargs)
# we have to replace all & signs to & for it be proper html
html = html.replace("&", "&")
return html
except Exception as e:
if str(e) == """'str' object has no attribute 'caller_stack'""":
raise TacticException("Mako variable 'context' has been redefined. Please use another variable name")
else:
print("Error in view [%s]: " % self.view, exceptions.text_error_template().render())
#html = exceptions.html_error_template().render(css=False)
html = exceptions.html_error_template().render()
html = html.replace("body { font-family:verdana; margin:10px 30px 10px 30px;}", "")
return html
def handle_layout_behaviors(self, layout):
'''required for BaseTableElementWdg used by fast table'''
pass
def add_test(self, xml):
# add a test env in
text_node = xml.get_nodes("config/%s/test" % self.view)
def add_kwargs(self, widget, xml):
"""
ARGS_KEYS = {
'category': {
'description': 'category of the config to search for',
},
'view': {
'description': 'The view defined in the widget config/Custom Layout Editor that contains the custom html',
'type': 'TextWdg',
'order': 2,
'category': 'Options'
},
}
<kwargs>
<kwarg name="category">
<description>category of the config to search for</description>
</kwarg>
<kwarg name="view">
<description>The view defined in the widget config/Custom Layout Editor that contains the custom html</description>
<type>TextWdg</type>
<order>2</order>
<category>Options</category>
</kwarg>
</kwargs>
"""
kwargs_nodes = xml.get_nodes("config/%s/kwargs/kwarg" % self.view)
for kwarg_node in kwargs_node:
pass
def add_behaviors(self, widget, xml):
behavior_nodes = xml.get_nodes("config/%s/behavior" % self.view)
if behavior_nodes:
hidden_div = DivWdg()
hidden_div.add_styles("display: none");
hidden_div.add_class("spt_customlayoutwdg_handoffs")
widget.add( hidden_div )
widget.add_behavior({
'type': 'load',
'cbjs_action': '''
// handle embedded load behaviors!
var el_load_list = bvr.src_el.getElements(".SPT_BVR_LOAD_PENDING");
spt.behavior.process_load_behaviors( el_load_list );
'''
})
# remove objects that cannot be json marshalled
view_kwargs = self.kwargs.copy()
for key, value in view_kwargs.items():
try:
test = jsondumps(value)
except Exception as e:
del(view_kwargs[key])
for behavior_node in behavior_nodes:
bvr_div = DivWdg()
hidden_div.add( bvr_div )
css_class = Xml.get_attribute(behavior_node, 'class')
behavior_str = Xml.get_node_value(behavior_node)
behavior_str = behavior_str.strip()
# if the event is specified in the xml, then use that
event = Xml.get_attribute(behavior_node, 'event')
modkeys = Xml.get_attribute(behavior_node, 'modkeys')
relay_class = Xml.get_attribute(behavior_node, 'relay_class')
if not behavior_str:
continue
try:
try:
bvr = eval(behavior_str)
except:
# try it as a string
bvr_str = eval("'''\n%s\n'''" % behavior_str)
if bvr_str:
bvr = {}
bvr['cbjs_action'] = bvr_str
if event:
bvr['type'] = event
if modkeys:
bvr['modkeys'] = modkeys
# add the kwargs to this so behaviors have access
bvr['kwargs'] = view_kwargs
bvr['class_name'] = Common.get_full_class_name(self)
if relay_class:
bvr['bvr_match_class'] = relay_class
if not bvr.get("type"):
bvr['type'] = 'mouseup'
self.content.add_relay_behavior( bvr )
elif bvr.get("type") == "smart_drag":
bvr['bvr_match_class'] = css_class
self.content.add_behavior(bvr)
elif bvr.get("type") == "listen":
bvr['bvr_match_class'] = css_class
bvr['event_name'] = Xml.get_attribute(behavior_node,'event_name')
self.content.add_behavior(bvr)
else:
bvr['_handoff_'] = '@.getParent(".spt_custom_content").getElements(".%s")' % css_class
if not bvr.get("type"):
bvr['type'] = 'click_up'
bvr_div.add_behavior( bvr )
except Exception as e:
print("Error: ", e)
raise TacticException("Error parsing behavior [%s]" % behavior_str)
def get_config(self):
config = None
config_xml = self.kwargs.get('config_xml')
if config_xml:
config = WidgetConfig.get(xml=config_xml, view=self.view)
return config
# this is the new preferred way of defining CustomLayoutWdg
search = Search("config/widget_config")
if self.category:
search.add_filter("category", self.category)
else:
search.add_filter("category", 'CustomLayoutWdg')
if self.search_type:
search.add_filter("search_type", self.search_type)
search.add_filter("view", self.view)
configs = search.get_sobjects()
# annoyingly NULL is always higher than any number, so we have
# put them at the end
if configs and configs[0].column_exists("priority"):
configs = sorted(configs, key=lambda x: x.get("priority"))
configs.reverse()
if configs:
config = configs[0]
return config
# if it is not defined in the database, look at a config file
includes = self.kwargs.get("include")
if includes:
includes = includes.split("|")
for include in includes:
if include.find('/') != -1:
file_path = include
else:
tmp_path = __file__
dir_name = os.path.dirname(tmp_path)
file_path ="%s/../config/%s" % (dir_name, include)
config = WidgetConfig.get(file_path=file_path, view=self.view)
if config and config.has_view(self.view):
return config
# deprecated approach, assuming a "CustomLayoutWdg" as search_type,
# is deprecated
if not config:
search = Search("config/widget_config")
if self.category:
search.add_filter("category", self.category)
if self.search_type:
search.add_filter("search_type", "CustomLayoutWdg")
search.add_filter("view", self.view)
config = search.get_sobject()
#if not config and self.search_type and self.view:
# config = WidgetConfigView.get_by_search_type(self.search_type, self.view)
# this is the new preferred way of defining CustomLayoutWdg
# NOTE: this finds a definition where search type is not explicitly
# given>
if not config:
search = Search("config/widget_config")
search.add_filter("view", self.view)
search.add_filter("search_type", None)
config = search.get_sobject()
return config
def get_def_config(self, def_xml=None):
def_confg = None
self.def_view = self.kwargs.get('definition')
if self.def_view:
#raise TacticException("No definition view defined in custom layout with view [%s]" % self.view)
self.search_type = "CustomLayoutWdg"
search = Search("config/widget_config")
search.add_filter("search_type", self.search_type)
search.add_filter("view", self.def_view)
def_db_config = search.get_sobject()
if not def_db_config:
raise TacticException("Definition config [%s] not defined" % self.def_view)
def_xml = def_db_config.get_xml()
def_config = WidgetConfig.get("definition", xml=def_xml)
# also look inline to see if there are any definitions
if def_xml:
# just use the passed in xml for a definition
def_config = WidgetConfig.get(self.view, xml=def_xml)
return def_config
def replace_elements(self, html_str):
"""
# NOTE: this likely is a better way to extract elements, but still
# need to find a way to inject html back into the xml
xml = Xml()
xml.read_string("<div>%s</div>" % html_str)
elements = xml.get_nodes("//element")
for element in elements:
# create a complete config
full_line_str = xml.to_string(element)
tmp_config = '''<config><tmp>%s</tmp></config>''' % full_line_str
try:
element_wdg = self.get_element_wdg(xml, self.def_config)
element_html = element_wdg.get_buffer_display()
except Exception as e:
from pyasm.widget import ExceptionWdg
element_html = ExceptionWdg(e).get_buffer_display()
xml = Xml()
try:
xml.read_string(element_html)
except Exception as e:
print("Error: ", e)
xml.read_string("<h1>%s</h1>" % str(e) )
root = xml.get_root_node()
parent = xml.get_parent(element)
xml.replace_child(parent, element, root)
return xml.to_string()
"""
# a simple readline interpreter
html = Html()
full_line = []
parse_context = None
for line in html_str.split("\n"):
line2 = line.strip()
#if not parse_context and not line2.startswith('<element '):
index = line2.find('<element>')
if index == -1:
index = line2.find('<element ')
if not parse_context and index == -1:
#line = Common.process_unicode_string(line)
html.writeln(line)
continue
if index != -1:
part1 = line2[:index]
html.write(part1)
line2 = line2[index:]
full_line.append(line2)
xml = Xml()
# determine if this is valid xml
try:
# create a complete config
full_line_str = "".join(full_line)
tmp_config = '''<config><tmp>%s</tmp></config>''' % full_line_str
xml.read_string(tmp_config, print_error=False)
full_line = []
parse_context = ''
except XmlException as e:
parse_context = 'element'
#raise e
continue
try:
if Xml.get_value(xml, "config/tmp/element/@enabled") == "false":
continue
element_wdg = self.get_element_wdg(xml, self.def_config)
if element_wdg:
element_html = element_wdg.get_buffer_display()
else:
element_html = ''
except Exception as e:
from pyasm.widget import ExceptionWdg
element_html = ExceptionWdg(e).get_buffer_display()
# Test to ensure that the html produced is "xml" conforming
"""
try:
new_xml = Xml()
new_xml.read_string(element_html)
except Exception as e:
f = open("/tmp/error", 'w')
f.write(element_html)
f.close()
#print(element_html)
print("Error: ", e)
"""
if element_html:
html.writeln(element_html)
sequence_wdg = self.get_sequence_wdg()
html.writeln(sequence_wdg.get_buffer_display() )
return html.to_string()
# FIXME: this is all necessary because CustomLayoutWdg is not derived from
# BaseTableElementWdg ... CustomLayoutWdg should probably not be used
# as a table elementj
# NOTE: Use tactic.ui.table.CustomLayoutElementWdg for adding custom layouts
# to layouts
def set_parent_wdg(self, name):
pass
def is_in_column(self):
return True
def is_groupable(self):
return False
def set_layout_wdg(self, widget):
self.layout_wdg = widget
def get_layout_wdg(self):
return self.layout_wdg
def get_title(self):
'''Returns a widget containing the title to be displayed for this
column'''
if self.title:
title = self.title
return title
title = self.name
if not title:
title = ""
return title
title = Common.get_display_title(title)
return title
def get_value(self):
return None
def get_text_value(self):
'''for csv export'''
sobject = self.get_current_sobject()
text_expr = self.kwargs.get("text_value")
text_expr = "@GET(.id)"
if not text_expr:
return ''
value = Search.eval(text_expr, sobject, single=True)
return value
def is_sortable(self):
return False
def is_searchable(self):
return False
def handle_th(self, th, xx=None):
pass
def handle_td(self, td):
pass
def handle_tr(self, tr):
pass
def is_editable(self):
return False
def get_bottom_wdg(self):
return None
def get_group_bottom_wdg(self, sobjects=None):
return None
def get_header_option_wdg(self):
return None
def get_generator(self):
return self.generator_element
def set_generator(self, element_name):
self.generator_element = element_name
## END TableElementWdg methods
def get_sequence_wdg(self):
funcs = []
div = DivWdg()
if not self.sequence_data:
return div
div.add_behavior( {
'type': 'load',
'data': self.sequence_data,
'cbjs_action': '''
var count = -1;
var func = function() {
if (count == bvr.data.length-1) {
return;
}
count += 1;
var item = bvr.data[count];
var unique_id = item.unique_id;
var class_name = item.class_name;
var kwargs = item.kwargs;
var options = {
async: true,
callback: func
}
spt.panel.load($(unique_id), class_name, kwargs, {}, options);
}
func();
'''
} )
return div
def get_async_element_wdg(self, xml, element_name, load):
tmp_config = WidgetConfig.get('tmp', xml=xml)
display_handler = tmp_config.get_display_handler(element_name)
display_options = tmp_config.get_display_options(element_name)
div = DivWdg()
unique_id = div.set_unique_id()
div.add_class("spt_manual_load")
show_loading = self.kwargs.get("show_loading")
if load == "sequence":
self.sequence_data.append( {
'class_name': display_handler,
'kwargs': display_options,
'unique_id': unique_id
} )
elif load == "manual":
show_loading = False
div.add_behavior( {
'type': 'load',
'class_name': display_handler,
'kwargs': display_options,
'cbjs_action': '''
bvr.src_el.load = function() {
spt.panel.async_load(bvr.src_el, bvr.class_name, bvr.kwargs);
}
'''
} )
msg = DivWdg()
div.add(msg)
msg.add_style("padding", "20px")
msg.add_style("margin", "10px auto")
msg.add_style("width", "150px")
msg.add_style("border", "solid 1px #DDD")
msg.add_style("text-align", "center")
msg.add("Loading ...")
else:
div.add_behavior( {
'type': 'load',
'class_name': display_handler,
'kwargs': display_options,
'cbjs_action': '''
spt.panel.async_load(bvr.src_el, bvr.class_name, bvr.kwargs);
'''
} )
if show_loading not in ["False", False, "false"]:
loading_div = DivWdg()
loading_div.add_style("margin: auto auto")
loading_div.add_style("width: 150px")
loading_div.add_style("text-align: center")
loading_div.add_style("padding: 20px")
div.add(loading_div)
loading_div.add('''<img src="/context/icons/common/indicator_snake.gif" border="0"/> <b>Loading ...</b>''')
return div
def get_element_wdg(self, xml, def_config):
element_node = xml.get_node("config/tmp/element")
attrs = Xml.get_attributes(element_node)
element_name = attrs.get("name")
widget = self.get_widget(element_name)
if widget:
return widget
if not element_name:
import random
num = random.randint(0, 1000000)
element_name = "element%s" % num
xml.set_attribute(element_node, "name", element_name)
# enable an ability to have a widget only loaded once in a request
if attrs.get('load_once') in ['true', True]:
widgets = Container.get("CustomLayoutWdg:widgets")
if widgets == None:
widgets = {}
Container.put("CustomLayoutWdg:widgets", widgets)
else:
if widgets[element_name] == True:
return None
widgets[element_name] = True
# provide the ability to have shorthand format
# ie: <element display_class="tactic.ui..." />
display_node = xml.get_node("config/tmp/element/display")
if display_node is None:
view = attrs.get("view")
type = attrs.get("type")
if type == "reference":
search_type = attrs.get("search_type")
self.config = WidgetConfigView.get_by_search_type(search_type, view)
# check if definition has no name. Don't use element_name
if not attrs.get("name"):
return
element_wdg = self.config.get_display_widget(element_name, extra_options=attrs)
container = DivWdg()
container.add(element_wdg)
return container
class_name = attrs.get("display_class")
# if no class name is defined and not view is defined look
# at predefined elements
if not view and not class_name:
element_wdg = self.config.get_display_widget(element_name, extra_options=attrs)
container = DivWdg()
container.add(element_wdg)
return container
# look at the attributes
if not class_name:
class_name = "tactic.ui.panel.CustomLayoutWdg"
display_node = xml.create_element("display")
xml.set_attribute(display_node, "class", class_name)
xml.append_child(element_node, display_node)
for name, value in attrs.items():
# replace the spt_ in the name.
# NOTE: should this be restricted to only spt_ attributes?
name = name.replace("spt_", "")
attr_node = xml.create_element(name)
xml.set_node_value(attr_node, value)
xml.append_child(display_node, attr_node)
load = attrs.get("load")
if load in ["none"]:
return None
elif load in ["async", "sequence","manual"]:
return self.get_async_element_wdg(xml, element_name, load)
# add the content
try:
view_node = xml.get_node("config/tmp/element/display/view")
if view_node is not None:
view = xml.get_node_value(view_node)
if view.startswith("."):
if self.view_folder:
xml.set_node_value(view_node, "%s%s" %(self.view_folder,view))
tmp_config = WidgetConfig.get('tmp', xml=xml)
configs = []
configs.append(tmp_config)
# add the def_config if it exists
if def_config:
configs.append(def_config)
config = WidgetConfigView('CustomLayoutWdg', 'tmp', configs, state=self.state)
# NOTE: this doesn't work too well when we go to an abasolute
# view.
parent_view = self.kwargs.get("parent_view")
if parent_view:
parent_view = parent_view.replace(".", "/")
parent_view = "%s/%s" % (parent_view, self.view)
else:
parent_view = self.view
# NOTE: need some protection code for infinite loops
includes = self.kwargs.get("include")
extra_options = {"parent_view": parent_view}
if includes:
extra_options['include'] = includes
element_wdg = config.get_display_widget(element_name, extra_options=extra_options)
element_top = element_wdg.get_top()
for name, value in attrs.items():
if name == 'class':
for item in value.split(" "):
element_top.add_class(item)
elif name == 'style':
for item in re.split(";\ ?", value):
element_top.add_style(item)
else:
element_top.set_attr(name, value)
# make a provision if this custom widget is in a table
if self.layout:
sobject = self.get_current_sobject()
element_wdg.set_sobject(sobject)
except Exception as e:
from pyasm.widget import ExceptionWdg
log = ExceptionWdg(e)
element_wdg = log
return element_wdg
container.add(element_wdg)
return container
def get_smart_header_context_menu_data(self):
from pyasm.widget import IconWdg
menu_data = { 'menu_tag_suffix': 'MAIN', 'width': 200 }
opt_spec_list = []
opt_spec_list.append( {
"type": "action",
"label": "Edit Definition",
"icon": IconWdg.EDIT,
"bvr_cb": {
'cbjs_action': 'alert("Edit Definition")'
}
})
opt_spec_list.append( {
"type": "separator"
} )
opt_spec_list.append( {
"type": "action",
"label": "Split Horizontal",
"icon": IconWdg.TABLE_ROW_INSERT,
"bvr_cb": {
'cbjs_action': 'spt.custom_project.split_horizontal(evt, bvr)'
}
})
opt_spec_list.append( {
"type": "action",
"label": "Split Vertical",
"bvr_cb": {'cbjs_action': "spt.js_log.show();"}
})
opt_spec_list.append( {
"type": "action",
"label": "Remove Panel",
"icon": IconWdg.TABLE_ROW_DELETE,
"bvr_cb": {
'cbjs_action': 'spt.custom_project.remove_panel(evt, bvr)'
}
})
opt_spec_list.append( {
"type": "separator"
} )
opt_spec_list.append( {
"type": "action",
"label": "Save Layout",
"icon": IconWdg.SAVE,
"bvr_cb": {
'cbjs_action': 'spt.custom_project.save_layout(evt, bvr)'
}
})
menu_data['opt_spec_list'] = opt_spec_list
return menu_data
__all__.append("TestStateWdg")
class TestStateWdg(BaseRefreshWdg):
def get_display(self):
self.top.add(self.kwargs)
self.top.add("<hr/>")
if self.sobjects:
self.top.add(self.sobjects[0].get_code())
else:
self.top.add("No sobjects")
return self.top
# DEPRECATED
"""
class ContainerWdg(BaseRefreshWdg):
def get_args_keys(self):
return {
'inner_width': 'Inner width, sans rounded corner wrapper ... numeric value only',
'inner_height': 'Inner height, sans rounded corner wrapper ... numeric value only',
'show_resize_scroll': 'true|false: determines whether to show scroll bars or not'
}
def init(self):
self.top = DivWdg()
self.content_wdg = DivWdg()
is_IE = WebContainer.get_web().is_IE()
# get the width and height of the contents (the inner part of the container) ...
self.inner_width = self.kwargs.get('inner_width')
self.inner_height = self.kwargs.get('inner_height')
if self.inner_width:
self.inner_width = int(self.inner_width)
if is_IE:
self.inner_width -= 20 # adjust for rounded corner wrapper
else:
self.inner_width = 600
if self.inner_height:
self.inner_height = int(self.inner_height)
if is_IE:
self.inner_height -= 20 # adjust for rounded corner wrapper
else:
self.inner_height = 200
# Now place a ResizeScrollWdg within a RoundedCornerDivWdg ... the ResizeScrollWdg will contain
# the actual contents of this container, so that the contents can be scrolled and resized ...
#
from tactic.ui.container import RoundedCornerDivWdg
color = self.top.get_color("background")
self.rc_wdg = RoundedCornerDivWdg(hex_color_code=color,corner_size=10)
#show_scrollbars = self.kwargs.get("show_resize_scroll")
#if show_scrollbars in ['', 'false']:
# self.inner_wdg = DivWdg()
#else:
# from tactic.ui.container import ResizeScrollWdg
# self.inner_wdg = ResizeScrollWdg( width=self.inner_width, height=self.inner_height, scroll_bar_size_str='medium', scroll_expansion='inside' )
self.inner_wdg = DivWdg()
self.inner_wdg.add_style("width: %s" % self.inner_width)
self.inner_wdg.add_style("height: %s" % self.inner_height)
self.inner_wdg.add_style("overflow-y: auto")
self.inner_wdg.add_style("overflow-x: hidden")
self.rc_wdg.add( self.inner_wdg )
self.content_wdg.add(self.rc_wdg)
self.table = Table(css="minimal")
self.table.add_row()
self.content_td = self.table.add_cell()
self.content_td.add_class("spt_content")
self.content_td.add_style('padding: 2px')
def add_style(self, name, value=None):
if name.startswith("height"):
self.content_td.add_style(name, value)
elif name.startswith("width"):
self.content_td.add_style(name, value)
else:
self.top.add_style(name, value)
def get_display(self):
# fill in the content widget
for widget in self.widgets:
self.inner_wdg.add(widget)
self.top.add_class("spt_container")
self.content_wdg.add_style("float: left")
# -- DO NOT SET THE WIDTH AND HEIGHT of the content_wdg! Commenting out these lines ...
# self.content_wdg.add_style("width: 100%")
# self.content_wdg.add_style("height: 100%")
# add the content
self.content_td.add_style("vertical-align: top")
self.content_td.add(self.content_wdg)
self.top.add(self.table)
return self.top
def get_divider_wdg(self, activator, mode='vertical'):
divider_div = DivWdg()
divider_div.add_style("border-style", "dashed")
divider_div.add_style("border-color", "#999")
if mode == 'vertical':
divider_div.add_style("margin-left", "3px")
divider_div.add_style("height", "100%")
divider_div.add_style("width", "1px")
divider_div.add_style("border-width", "0 0 0 1px")
else:
divider_div.add_style("margin-top", "3px")
divider_div.add_style("width", "100%")
divider_div.add_style("height", "1px")
divider_div.add_style("border-width", "1px 0 0 0")
divider_div.add_class("hand")
divider_div.add_class("content")
divider_div.add_style("display", "none")
activator.add_event("onmouseover", "$(this).getElement('.content').setStyle('display', '');")
activator.add_event("onmouseout", "$(this).getElement('.content').setStyle('display', 'none');")
return divider_div
"""
class SObjectHeaderWdg(BaseRefreshWdg):
def get_args_keys(self):
return {
"parent_key": "the search key of the sobject that the header will display"
}
def get_display(self):
search_key = self.kwargs.get('parent_key')
div = DivWdg()
if not search_key:
div.add("Search Key for SObjectHeaderHdg is empty")
return div
sobject = Search.get_by_search_key( search_key )
if not sobject:
div.add("SObject with Search Key [%s] does not exist" % search_key)
return div
search_type_obj = sobject.get_search_type_obj()
title = search_type_obj.get_title()
title_wdg = DivWdg()
title_wdg.add_style("font-size: 1.8em")
name = sobject.get_display_value()
title_wdg.add("%s: %s" % (title, name ))
div.add(title_wdg)
div.add(HtmlElement.hr())
return div
| epl-1.0 | -1,024,281,543,555,688,800 | 30.318209 | 166 | 0.519075 | false |
scikit-learn-contrib/categorical-encoding | category_encoders/__init__.py | 1 | 1448 | """
.. module:: category_encoders
:synopsis:
:platform:
"""
from category_encoders.backward_difference import BackwardDifferenceEncoder
from category_encoders.binary import BinaryEncoder
from category_encoders.count import CountEncoder
from category_encoders.hashing import HashingEncoder
from category_encoders.helmert import HelmertEncoder
from category_encoders.one_hot import OneHotEncoder
from category_encoders.ordinal import OrdinalEncoder
from category_encoders.sum_coding import SumEncoder
from category_encoders.polynomial import PolynomialEncoder
from category_encoders.basen import BaseNEncoder
from category_encoders.leave_one_out import LeaveOneOutEncoder
from category_encoders.target_encoder import TargetEncoder
from category_encoders.woe import WOEEncoder
from category_encoders.m_estimate import MEstimateEncoder
from category_encoders.james_stein import JamesSteinEncoder
from category_encoders.cat_boost import CatBoostEncoder
from category_encoders.glmm import GLMMEncoder
__version__ = '2.2.2'
__author__ = 'willmcginnis'
__all__ = [
'BackwardDifferenceEncoder',
'BinaryEncoder',
'CountEncoder',
'HashingEncoder',
'HelmertEncoder',
'OneHotEncoder',
'OrdinalEncoder',
'SumEncoder',
'PolynomialEncoder',
'BaseNEncoder',
'LeaveOneOutEncoder',
'TargetEncoder',
'WOEEncoder',
'MEstimateEncoder',
'JamesSteinEncoder',
'CatBoostEncoder',
'GLMMEncoder'
]
| bsd-3-clause | -7,168,020,823,898,292,000 | 28.55102 | 75 | 0.785221 | false |
mouton5000/DiscreteEventApplicationEditor | game/Registeries/SpriteRegistery.py | 1 | 2662 | from pygame.rect import Rect
__author__ = 'mouton'
from pygame.sprite import Sprite
import pygame
from collections import defaultdict
from copy import copy
_rootDir = None
_spritesList = defaultdict(pygame.sprite.OrderedUpdates)
_rectsToUpdate = []
def init(rootDir):
global _rootDir
_rootDir = rootDir
reinit()
def reinit():
_spritesList.clear()
del _rectsToUpdate[:]
def getLayers():
return iter(_spritesList.keys())
def draw(z, scene):
_spritesList[z].draw(scene)
def addRectToUpdate(rectToUpdate):
_rectsToUpdate.append(rectToUpdate)
def getRectsToUpdate():
return _rectsToUpdate
def clearRectsToUpdate():
del _rectsToUpdate[:]
class SpriteReg(Sprite):
def __init__(self, fileName, x, y, z, rotate, scale):
Sprite.__init__(self)
self.fileName = None
self.z = None
self.rect = None
self.reload(fileName, x, y, z, rotate, scale)
def reload(self, fileName, x, y, z, rotate, scale):
filePath = _rootDir + '/' + fileName
import game.gameWindow as gameWindow
scene = gameWindow.getScene()
prevRect = copy(self.rect)
if self.fileName is None or self.fileName != fileName or rotate != 0 or scale != 1:
self.fileName = fileName
self.image = pygame.image.load(filePath).convert_alpha(scene)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
if rotate != 0 or scale != 1:
self.image = pygame.transform.rotozoom(self.image, rotate, scale)
transformedRect = self.image.get_rect()
transformedRect.center = self.rect.center
self.rect = transformedRect
if prevRect is not None:
rectToUpdate = Rect(prevRect.x - 1, prevRect.y - 1, prevRect.width + 2, prevRect.height + 2)
r2 = Rect(self.rect.x - 1, self.rect.y - 1, self.rect.width + 2, self.rect.height + 2)
rectToUpdate.union_ip(r2)
addRectToUpdate(rectToUpdate)
else:
rectToUpdate = Rect(self.rect.x - 1, self.rect.y - 1, self.rect.width + 2, self.rect.height + 2)
addRectToUpdate(rectToUpdate)
if self.z is not None:
self.remove()
_spritesList[z].add(self)
self.z = z
def __str__(self):
return str((self.fileName, self.rect))
def __repr__(self):
return str((self.fileName, self.rect))
def remove(self):
_spritesList[self.z].remove(self)
rectToUpdate = Rect(self.rect.x - 1, self.rect.y - 1, self.rect.width + 2, self.rect.height + 2)
addRectToUpdate(rectToUpdate) | mit | 7,548,703,405,394,868,000 | 25.89899 | 108 | 0.614576 | false |
CVSoft/UTQuery | Demo_GSQuery.py | 1 | 3672 | from time import sleep
import GSQuery
# Let's pick a server. We'll use TeamRectifier as they're usually populated.
gs = GSQuery.GSServer('31.186.250.42')
# Let's get the basic server details with the GameSpy query protocol.
# The query methods return dictionary types, so we can store them for later use
# instead of having to ask the server every time we want to know something.
try: gs_bsd = gs.parse_query()
# Sometimes, our packets get lost, or the server is restarting. In that case,
# we can just wait a few seconds, try again, and hope our query is returned.
except:
sleep(5)
gs_bsd = gs.parse_query()
# and find out the server's name
print "Server Name :", gs_bsd["hostname"]
# Now let's see what map they're on
print "Map Name :", gs_bsd["mapname"]
# But what are they playing? (Assume the server name didn't make this obvious.)
# Let's see what game type is active.
print "Gametype :", gs_bsd["gametype"]
# What game version do they use?
print "Game Version:", gs_bsd["gamever"]
#a little cleanup for what follows...
print "\n====\n"
# Why do all of these methods start with parse? This is because they take a
# `query` argument, which is a raw query returned by UTServer.query().
# Specifying the `query` argument is optional, and the method will send the
# necessary type of query needed if one is not provided.
################################################################################
# Unlike the query method used above, the player query method does not return a
# dictionary of key-value pairs, but rather a list of UTPlayer objects.
#
# UTPlayer objects have six attributes:
# - Name, which is the colored name shown in-game, if colored names are used.
# - Score
# - Ping, in milliseconds. This ping value is the one shown in-game.
# - Team, which for team games is (red=0, blue=1). For DeathMatch, all players
# have a team value of 0. Unlike UTQuery, spectators are not shown at all.
# - Player ID, which is simply the player's index in the GameSpy query response.
# - Stats ID, which the GameSpy protocol doesn't implement and is set to None.
#
# We can access these values through their values:
# name, score, ping, team, pid, sid
# respectively.
#
# Let's start with getting the online player list.
gs_players = gs.parse_players()
# If we get an empty list, one of two things happened: either no players are
# online, or our query was not returned. The server will return data if our
# query was lost, but I haven't bothered to implement that check in my code
# yet.
# Now let's display their information. We really only care about name, score,
# team, and ping. Since we are requesting information from a TeamArenaMaster
# server, we are going to assume teams are present. For a DeathMatch server,
# all players have a team value of 0, since there are no teams.
# First, we should check if players are online.
if len(gs_players) > 0:
#If there are, let's display some information about them.
print "Online Players:"
for p in gs_players:
# Skip anything with a ping of 0, as they're probably not real players.
# Team scores appear as players with a ping of 0.
if p.ping == 0: continue
# Translate the team number to English. The rest are just numbers.
team = ["red", "blue"][p.team]
# Show their name, score, and ping.
print p.name + " is on " + team + " with a score of " + str(p.score) + \
" and a ping of " + str(p.ping) + "ms."
# If we didn't find anyone online, we go here.
else:
print "No online players!"
| gpl-3.0 | -4,055,352,095,003,567,600 | 40.697674 | 80 | 0.668573 | false |
ray-project/ray | python/ray/util/collective/tests/single_node_cpu_tests/test_reducescatter.py | 1 | 5148 | """Test the collective reducescatter API."""
import pytest
import ray
import numpy as np
import torch
from ray.util.collective.types import Backend
from ray.util.collective.tests.cpu_util import create_collective_workers, \
init_tensors_for_gather_scatter
@pytest.mark.parametrize("backend", [Backend.GLOO])
@pytest.mark.parametrize("tensor_backend", ["numpy", "torch"])
@pytest.mark.parametrize("array_size",
[2, 2**5, 2**10, 2**15, 2**20, [2, 2], [5, 5, 5]])
def test_reducescatter_different_array_size(ray_start_single_node, array_size,
tensor_backend, backend):
world_size = 2
actors, _ = create_collective_workers(world_size, backend=backend)
init_tensors_for_gather_scatter(
actors, array_size=array_size, tensor_backend=tensor_backend)
results = ray.get([a.do_reducescatter.remote() for a in actors])
for i in range(world_size):
if tensor_backend == "numpy":
assert (results[i] == np.ones(array_size, dtype=np.float32) *
world_size).all()
else:
assert (results[i] == torch.ones(array_size, dtype=torch.float32) *
world_size).all()
@pytest.mark.parametrize("backend", [Backend.GLOO])
@pytest.mark.parametrize("dtype",
[np.uint8, np.float16, np.float32, np.float64])
def test_reducescatter_different_dtype(ray_start_single_node, dtype, backend):
world_size = 2
actors, _ = create_collective_workers(world_size, backend=backend)
init_tensors_for_gather_scatter(actors, dtype=dtype)
results = ray.get([a.do_reducescatter.remote() for a in actors])
for i in range(world_size):
for j in range(world_size):
assert (results[i] == np.ones(10, dtype=dtype) * world_size).all()
@pytest.mark.parametrize("backend", [Backend.GLOO])
def test_reducescatter_torch_numpy(ray_start_single_node, backend):
world_size = 2
shape = [10, 10]
actors, _ = create_collective_workers(world_size, backend=backend)
# tensor is pytorch, list is numpy
for i, a in enumerate(actors):
t = torch.ones(shape, dtype=torch.float32) * (i + 1)
ray.wait([a.set_buffer.remote(t)])
list_buffer = [
np.ones(shape, dtype=np.float32) for _ in range(world_size)
]
ray.wait([a.set_list_buffer.remote(list_buffer)])
results = ray.get([a.do_reducescatter.remote() for a in actors])
for i in range(world_size):
assert (results[i] == torch.ones(shape, dtype=torch.float32) *
world_size).all()
# tensor is numpy, list is pytorch
for i, a in enumerate(actors):
t = np.ones(shape, dtype=np.float32) * (i + 1)
ray.wait([a.set_buffer.remote(t)])
list_buffer = [
torch.ones(shape, dtype=torch.float32) for _ in range(world_size)
]
ray.wait([a.set_list_buffer.remote(list_buffer)])
results = ray.get([a.do_reducescatter.remote() for a in actors])
for i in range(world_size):
assert (
results[i] == np.ones(shape, dtype=np.float32) * world_size).all()
# some tensors in the list are pytorch, some are numpy
for i, a in enumerate(actors):
if i % 2 == 0:
t = torch.ones(shape, dtype=torch.float32) * (i + 1)
else:
t = np.ones(shape, dtype=np.float32) * (i + 1)
ray.wait([a.set_buffer.remote(t)])
list_buffer = []
for j in range(world_size):
if j % 2 == 0:
list_buffer.append(torch.ones(shape, dtype=torch.float32))
else:
list_buffer.append(np.ones(shape, dtype=np.float32))
ray.wait([a.set_list_buffer.remote(list_buffer)])
results = ray.get([a.do_reducescatter.remote() for a in actors])
for i in range(world_size):
if i % 2 == 0:
assert (results[i] == torch.ones(shape, dtype=torch.float32) *
world_size).all()
else:
assert (results[i] == np.ones(shape, dtype=np.float32) *
world_size).all()
# mixed case
for i, a in enumerate(actors):
if i % 2 == 0:
t = torch.ones(shape, dtype=torch.float32) * (i + 1)
else:
t = np.ones(shape, dtype=np.float32) * (i + 1)
ray.wait([a.set_buffer.remote(t)])
list_buffer = []
for j in range(world_size):
if j % 2 == 0:
list_buffer.append(np.ones(shape, dtype=np.float32))
else:
list_buffer.append(torch.ones(shape, dtype=torch.float32))
ray.wait([a.set_list_buffer.remote(list_buffer)])
results = ray.get([a.do_reducescatter.remote() for a in actors])
for i in range(world_size):
if i % 2 == 0:
assert (results[i] == torch.ones(shape, dtype=torch.float32) *
world_size).all()
else:
assert (results[i] == np.ones(shape, dtype=np.float32) *
world_size).all()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| apache-2.0 | -7,122,715,093,684,820,000 | 39.21875 | 79 | 0.583528 | false |
kinooo/Sick-Beard | sickbeard/notifiers/pushbullet.py | 1 | 4935 | # Author: Pedro Correia (http://github.com/pedrocorreia/)
# Based on pushalot.py by Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import base64
from httplib import HTTPSConnection, HTTPException
from urllib import urlencode
from ssl import SSLError
import sickbeard
from sickbeard import logger, common
class PushbulletNotifier:
def test_notify(self, pushbullet_api):
return self._sendPushbullet(pushbullet_api, event="Test", message="Testing Pushbullet settings from Sick Beard", method="POST", notificationType="note", force=True)
def get_devices(self, pushbullet_api):
return self._sendPushbullet(pushbullet_api, method="GET", force=True)
def notify_snatch(self, ep_name):
if sickbeard.PUSHBULLET_NOTIFY_ONSNATCH:
self._sendPushbullet(pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_SNATCH], message=ep_name, notificationType="note", method="POST")
def notify_download(self, ep_name):
if sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD:
self._sendPushbullet(pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD], message=ep_name, notificationType="note", method="POST")
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD:
self._sendPushbullet(pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], message=ep_name + ": " + lang, notificationType="note", method="POST")
def _sendPushbullet(self, pushbullet_api=None, pushbullet_device=None, event=None, message=None, notificationType=None, method=None, force=False):
if not sickbeard.USE_PUSHBULLET and not force:
return False
if pushbullet_api == None:
pushbullet_api = sickbeard.PUSHBULLET_API
if pushbullet_device == None:
pushbullet_device = sickbeard.PUSHBULLET_DEVICE
if method == 'POST':
uri = '/v2/pushes'
else:
uri = '/api/devices'
logger.log(u"Pushbullet event: " + str(event), logger.DEBUG)
logger.log(u"Pushbullet message: " + str(message), logger.DEBUG)
logger.log(u"Pushbullet api: " + str(pushbullet_api), logger.DEBUG)
logger.log(u"Pushbullet devices: " + str(pushbullet_device), logger.DEBUG)
logger.log(u"Pushbullet notification type: " + str(notificationType), logger.DEBUG)
http_handler = HTTPSConnection("api.pushbullet.com")
authString = base64.encodestring('%s:' % (pushbullet_api)).replace('\n', '')
if notificationType == None:
testMessage = True
try:
logger.log(u"Testing Pushbullet authentication and retrieving the device list.", logger.DEBUG)
http_handler.request(method, uri, None, headers={'Authorization':'Basic %s:' % authString})
except (SSLError, HTTPException):
logger.log(u"Pushbullet notification failed.", logger.ERROR)
return False
else:
testMessage = False
try:
data = {
'title': event.encode('utf-8'),
'body': message.encode('utf-8'),
'device_iden': pushbullet_device,
'type': notificationType}
http_handler.request(method, uri, body = urlencode(data), headers={'Authorization':'Basic %s' % authString})
pass
except (SSLError, HTTPException):
return False
response = http_handler.getresponse()
request_body = response.read()
request_status = response.status
if request_status == 200:
if testMessage:
return request_body
else:
logger.log(u"Pushbullet notifications sent.", logger.DEBUG)
return True
elif request_status == 410:
logger.log(u"Pushbullet auth failed: %s" % response.reason, logger.ERROR)
return False
else:
logger.log(u"Pushbullet notification failed.", logger.ERROR)
return False
notifier = PushbulletNotifier
| gpl-3.0 | -8,564,425,933,284,914,000 | 43.0625 | 185 | 0.643364 | false |
otsaloma/gaupol | aeidon/pattern.py | 1 | 1548 | # -*- coding: utf-8 -*-
# Copyright (C) 2007 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Regular expression substitution for subtitle text."""
import aeidon
import re
__all__ = ("Pattern",)
class Pattern(aeidon.MetadataItem):
"""
Regular expression substitution for subtitle text.
:ivar enabled: ``True`` if pattern should be used, ``False`` if not
:ivar fields: Dictionary of all data field names and values
:ivar local: ``True`` if pattern is defined by user, ``False`` if system
"""
def __init__(self, fields=None):
"""Initialize a :class:`Pattern` instance."""
aeidon.MetadataItem.__init__(self, fields)
self.enabled = True
self.local = False
def get_flags(self):
"""Return the evaluated value of the ``Flags`` field."""
flags = 0
for name in self.get_field_list("Flags"):
flags = flags | getattr(re, name)
return flags
| gpl-3.0 | 660,636,134,076,191,900 | 31.93617 | 76 | 0.677649 | false |
fernandog/Medusa | ext/sqlalchemy/engine/__init__.py | 1 | 20438 | # engine/__init__.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and its public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
from .interfaces import (
Connectable,
CreateEnginePlugin,
Dialect,
ExecutionContext,
ExceptionContext,
# backwards compat
Compiled,
TypeCompiler
)
from .base import (
Connection,
Engine,
NestedTransaction,
RootTransaction,
Transaction,
TwoPhaseTransaction,
)
from .result import (
BaseRowProxy,
BufferedColumnResultProxy,
BufferedColumnRow,
BufferedRowResultProxy,
FullyBufferedResultProxy,
ResultProxy,
RowProxy,
)
from .util import (
connection_memoize
)
from . import util, strategies
# backwards compat
from ..sql import ddl
default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
.. versionchanged:: 0.8
By default, result row names match case-sensitively.
In version 0.7 and prior, all matches were case-insensitive.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a repr() of their parameter lists to the engines
logger, which defaults to sys.stdout. The ``echo`` attribute of
``Engine`` can be modified at any time to turn logging on and
off. If set to the string ``"debug"``, result rows will be
printed to the standard output as well. This flag ultimately
controls a Python logger; see :ref:`dbengine_logging` for
information on how to configure logging directly.
:param echo_pool=False: if True, the connection pool will log
all checkouts/checkins to the logging stream, which defaults to
sys.stdout. This flag ultimately controls a Python logger; see
:ref:`dbengine_logging` for information on how to configure logging
directly.
:param empty_in_strategy: The SQL compilation strategy to use when
rendering an IN or NOT IN expression for :meth:`.ColumnOperators.in_`
where the right-hand side
is an empty set. This is a string value that may be one of
``static``, ``dynamic``, or ``dynamic_warn``. The ``static``
strategy is the default, and an IN comparison to an empty set
will generate a simple false expression "1 != 1". The ``dynamic``
strategy behaves like that of SQLAlchemy 1.1 and earlier, emitting
a false expression of the form "expr != expr", which has the effect
of evaluting to NULL in the case of a null expression.
``dynamic_warn`` is the same as ``dynamic``, however also emits a
warning when an empty set is encountered; this because the "dynamic"
comparison is typically poorly performing on most databases.
.. versionadded:: 1.2 Added the ``empty_in_strategy`` setting and
additionally defaulted the behavior for empty-set IN comparisons
to a static boolean expression.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including PostgreSQL, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param isolation_level: this string parameter is interpreted by various
dialects in order to affect the transaction isolation level of the
database connection. The parameter essentially accepts some subset of
these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``,
``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``.
Behavior here varies per backend, and
individual dialects should be consulted directly.
Note that the isolation level can also be set on a per-:class:`.Connection`
basis as well, using the
:paramref:`.Connection.execution_options.isolation_level`
feature.
.. seealso::
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`.Engine`.
:param paramstyle=None: The `paramstyle <http://legacy.python.org/dev/peps/pep-0249/#paramstyle>`_
to use when rendering bound parameters. This style defaults to the
one recommended by the DBAPI itself, which is retrieved from the
``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept
more than one paramstyle, and in particular it may be desirable
to change a "named" paramstyle into a "positional" one, or vice versa.
When this attribute is passed, it should be one of the values
``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or
``"pyformat"``, and should correspond to a parameter style known
to be supported by the DBAPI in use.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_pre_ping: boolean, if True will enable the connection pool
"pre-ping" feature that tests connections for liveness upon
each checkout.
.. versionadded:: 1.2
.. seealso::
:ref:`pool_disconnects_pessimistic`
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
.. seealso::
:ref:`pool_setting_recycle`
:param pool_reset_on_return='rollback': set the "reset on return"
behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`.
.. versionadded:: 0.7.6
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param plugins: string list of plugin names to load. See
:class:`.CreateEnginePlugin` for background.
.. versionadded:: 1.2.3
:param strategy='plain': selects alternate engine implementations.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ
<http://docs.sqlalchemy.org/en/latest/faq/metadata_schema.html#how-can-i-get-the-create-table-drop-table-output-as-a-string>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
"""
strategy = kwargs.pop('strategy', default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file.
The keys of interest to ``engine_from_config()`` should be prefixed, e.g.
``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument
indicates the prefix to be searched for. Each matching key (after the
prefix is stripped) is treated as though it were the corresponding keyword
argument to a :func:`.create_engine` call.
The only required key is (assuming the default prefix) ``sqlalchemy.url``,
which provides the :ref:`database URL <database_urls>`.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. The set of arguments
is extensible per-dialect using the ``engine_config_types`` accessor.
:param configuration: A dictionary (typically produced from a config file,
but this is not a requirement). Items whose keys start with the value
of 'prefix' will have that prefix stripped, and will then be passed to
:ref:`create_engine`.
:param prefix: Prefix to match and then strip from keys
in 'configuration'.
:param kwargs: Each keyword argument to ``engine_from_config()`` itself
overrides the corresponding item taken from the 'configuration'
dictionary. Keyword arguments should *not* be prefixed.
"""
options = dict((key[len(prefix):], configuration[key])
for key in configuration
if key.startswith(prefix))
options['_coerce_config'] = True
options.update(kwargs)
url = options.pop('url')
return create_engine(url, **options)
__all__ = (
'create_engine',
'engine_from_config',
)
| gpl-3.0 | 5,816,460,592,037,326,000 | 42.392781 | 137 | 0.683139 | false |
Workday/OpenFrame | tools/telemetry/catapult_base/dependency_manager/cloud_storage_info.py | 1 | 3811 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import stat
from catapult_base import cloud_storage
from catapult_base.dependency_manager import exceptions
class CloudStorageInfo(object):
def __init__(self, cs_bucket, cs_hash, download_path, cs_remote_path,
version_in_cs=None, archive_info=None):
""" Container for the information needed to download a dependency from
cloud storage.
Args:
cs_bucket: The cloud storage bucket the dependency is located in.
cs_hash: The hash of the file stored in cloud storage.
download_path: Where the file should be downloaded to.
cs_remote_path: Where the file is stored in the cloud storage bucket.
version_in_cs: The version of the file stored in cloud storage.
archive_info: An instance of ArchiveInfo if this dependency is an
archive. Else None.
"""
self._download_path = download_path
self._cs_remote_path = cs_remote_path
self._cs_bucket = cs_bucket
self._cs_hash = cs_hash
self._version_in_cs = version_in_cs
self._archive_info = archive_info
if not self._has_minimum_data:
raise ValueError(
'Not enough information specified to initialize a cloud storage info.'
' %s' % self)
def GetRemotePath(self):
"""Gets the path to a downloaded version of the dependency.
May not download the file if it has already been downloaded.
Will unzip the downloaded file if a non-empty archive_info was passed in at
init.
Returns: A path to an executable that was stored in cloud_storage, or None
if not found.
Raises:
CredentialsError: If cloud_storage credentials aren't configured.
PermissionError: If cloud_storage credentials are configured, but not
with an account that has permission to download the needed file.
NotFoundError: If the needed file does not exist where expected in
cloud_storage or the downloaded zip file.
ServerError: If an internal server error is hit while downloading the
needed file.
CloudStorageError: If another error occured while downloading the remote
path.
FileNotFoundError: If the download was otherwise unsuccessful.
"""
if not self._has_minimum_data:
return None
download_dir = os.path.dirname(self._download_path)
if not os.path.exists(download_dir):
os.makedirs(download_dir)
dependency_path = self._download_path
cloud_storage.GetIfHashChanged(
self._cs_remote_path, self._download_path, self._cs_bucket,
self._cs_hash)
if not os.path.exists(dependency_path):
raise exceptions.FileNotFoundError(dependency_path)
logging.error('has archive_info %s', self._archive_info)
if self.has_archive_info:
dependency_path = self._archive_info.GetUnzippedPath()
else:
mode = os.stat(dependency_path).st_mode
os.chmod(dependency_path, mode | stat.S_IXUSR)
return os.path.abspath(dependency_path)
@property
def version_in_cs(self):
return self._version_in_cs
@property
def _has_minimum_data(self):
return all([self._cs_bucket, self._cs_remote_path, self._download_path,
self._cs_hash])
@property
def has_archive_info(self):
return bool(self._archive_info)
def __repr__(self):
return (
'CloudStorageInfo(download_path=%s, cs_remote_path=%s, cs_bucket=%s, '
'cs_hash=%s, version_in_cs=%s, archive_info=%s)' % (
self._download_path, self._cs_remote_path, self._cs_bucket,
self._cs_hash, self._version_in_cs, self._archive_info))
| bsd-3-clause | 6,174,460,862,816,630,000 | 36.362745 | 80 | 0.674888 | false |
smallyear/linuxLearn | salt/salt/states/reg.py | 1 | 13518 | # -*- coding: utf-8 -*-
r'''
===========================
Manage the Windows registry
===========================
Many python developers think of registry keys as if they were python keys in a
dictionary which is not the case. The windows registry is broken down into the
following components:
-----
Hives
-----
This is the top level of the registry. They all begin with HKEY.
- HKEY_CLASSES_ROOT (HKCR)
- HKEY_CURRENT_USER(HKCU)
- HKEY_LOCAL MACHINE (HKLM)
- HKEY_USER (HKU)
- HKEY_CURRENT_CONFIG
----
Keys
----
Hives contain keys. These are basically the folders beneath the hives. They can
contain any number of subkeys.
-----------------
Values or Entries
-----------------
Values or Entries are the name/data pairs beneath the keys and subkeys. All keys
have a default name/data pair. It is usually "(Default)"="(value not set)". The
actual value for the name and the date is Null. The registry editor will display
"(Default)" and "(value not set)".
-------
Example
-------
The following example is taken from the windows startup portion of the registry:
```
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]
"RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s"
"NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\""
"BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp"
```
In this example these are the values for each:
Hive: `HKEY_LOCAL_MACHINE`
Key and subkeys: `SOFTWARE\Microsoft\Windows\CurrentVersion\Run`
Value:
- There are 3 value names: `RTHDVCPL`, `NvBackend`, and `BTMTrayAgent`
- Each value name has a corresponding value
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Load this state if the reg module exists
'''
return 'reg' if 'reg.read_key' in __salt__ else False
def _parse_key_value(key):
'''
split the full path in the registry to the key and the rest
'''
splt = key.split("\\")
hive = splt.pop(0)
vname = splt.pop(-1)
key = '\\'.join(splt)
return hive, key, vname
def _parse_key(key):
'''
split the hive from the key
'''
splt = key.split("\\")
hive = splt.pop(0)
key = '\\'.join(splt)
return hive, key
def present(name,
value=None,
vname=None,
vdata=None,
vtype='REG_SZ',
reflection=True,
use_32bit_registry=False):
'''
Ensure a registry key or value is present.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
:param str value: Deprecated. Use vname and vdata instead. Included here for
backwards compatibility.
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
:param str vdata: The value you'd like to set for the Key. If a value name
(vname) is passed, this will be the data for that value name. If not, this
will be the (Default) value for the key.
The type for the (Default) value is always REG_SZ and cannot be changed.
This parameter is optional. If not passed, the Key will be created with no
associated item/value pairs.
:param str vtype: The value type for the data you wish to store in the
registry. Valid values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_SZ (Default)
:param bool reflection: On 64 bit machines a duplicate value will be created
in the ``Wow6432Node`` for 32bit programs. This only applies to the SOFTWARE
key. This option is ignored on 32bit operating systems. This value defaults
to True. Set it to False to disable reflection.
.. deprecated:: 2015.8.2
Use `use_32bit_registry` instead.
The parameter seems to have no effect since Windows 7 / Windows 2008R2
removed support for reflection. The parameter will be removed in Boron.
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default if False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``0.15.3``. The
value will not be reflected in ``Wow6432Node``:
Example:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 0.15.3
- reflection: False
The following example will set the value for the ``version`` entry under the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``0.15.3``. The
value will be reflected in ``Wow6432Node``:
Example:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 0.15.3
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# This is for backwards compatibility
# If 'value' is passed a value, vdata becomes value and the vname is
# obtained from the key path
if value or value in [0, '']:
hive, key, vname = _parse_key_value(name)
vdata = value
ret['comment'] = 'State file is using deprecated syntax. Please update.'
salt.utils.warn_until(
'Boron',
'The \'value\' argument has been deprecated. '
'Please use vdata instead.'
)
else:
hive, key = _parse_key(name)
# Determine what to do
reg_current = __salt__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if vdata == reg_current['vdata'] and reg_current['success']:
ret['comment'] = '{0} in {1} is already configured'.\
format(vname if vname else '(Default)', name)
return ret
add_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(vname if vname else '(Default)'),
'Value': '{0}'.format(vdata)}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will add': add_change}}
return ret
# Configure the value
ret['result'] = __salt__['reg.set_value'](hive=hive,
key=key,
vname=vname,
vdata=vdata,
vtype=vtype,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key)
else:
ret['changes'] = {'reg': {'Added': add_change}}
ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key)
return ret
def absent(name, vname=None, use_32bit_registry=False):
'''
Ensure a registry value is removed. To remove a key use key_absent.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default if False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
CLI Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\\SOFTWARE\\Salt\\version':
reg.absent
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``version`` is the value name
So the value ``version`` will be deleted from the ``SOFTWARE\\Salt`` key in
the ``HKEY_CURRENT_USER`` hive.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
reg_check = __salt__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not reg_check['success'] or reg_check['vdata'] == '(value not set)':
if not vname:
hive, key, vname = _parse_key_value(name)
reg_check = __salt__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not reg_check['success'] or reg_check['vdata'] == '(value not set)':
ret['comment'] = '{0} is already absent'.format(name)
return ret
else:
ret['comment'] = '{0} is already absent'.format(name)
return ret
remove_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(vname if vname else '(Default)')}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will remove': remove_change}}
return ret
# Delete the value
ret['result'] = __salt__['reg.delete_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive)
else:
ret['changes'] = {'reg': {'Removed': remove_change}}
ret['comment'] = r'Removed {0} from {1}'.format(key, hive)
return ret
def key_absent(name, force=False, use_32bit_registry=False):
r'''
.. versionadded:: 2015.5.4
Ensure a registry key is removed. This will remove a key and all value
entries it contains. It will fail if the key contains subkeys.
:param str name: A string representing the full path to the key to be
removed to include the hive and the keypath. The hive can be any of the following:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:param bool force: A boolean value indicating that all subkeys should be
deleted with the key. If force=False and subkeys exists beneath the key you
want to delete, key_absent will fail. Use with caution. The default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
The following example will delete the ``SOFTWARE\Salt`` key and all subkeys
under the ``HKEY_CURRENT_USER`` hive.
Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\SOFTWARE\Salt':
reg.key_absent:
- force: True
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\Salt`` is the key
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
if not __salt__['reg.read_value'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)['success']:
ret['comment'] = '{0} is already absent'.format(name)
return ret
ret['changes'] = {'reg': {
'Removed': {
'Key': r'{0}\{1}'.format(hive, key)
}}}
# Check for test option
if __opts__['test']:
ret['result'] = None
return ret
# Delete the value
__salt__['reg.delete_key_recursive'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)
if __salt__['reg.read_value'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)['success']:
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Failed to remove registry key {0}'.format(name)
return ret
| apache-2.0 | 7,227,832,016,917,860,000 | 31.731235 | 99 | 0.575159 | false |
Naakh/naakh-py | naakh/api_client.py | 1 | 20321 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
from . import models
from .rest import RESTClientObject
from .rest import ApiException
import os
import re
import sys
import urllib
import json
import mimetypes
import random
import tempfile
import threading
from datetime import datetime
from datetime import date
# python 2 and python 3 compatibility library
from six import iteritems
try:
# for python3
from urllib.parse import quote
except ImportError:
# for python2
from urllib import quote
from .configuration import Configuration
class ApiClient(object):
"""
Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param host: The base path for the server to call.
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to the API.
"""
def __init__(self, host=None, header_name=None, header_value=None, cookie=None):
"""
Constructor of the class.
"""
self.rest_client = RESTClientObject()
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
if host is None:
self.host = Configuration().host
else:
self.host = host
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Python-Swagger/0.0.3'
@property
def user_agent(self):
"""
Gets user agent.
"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
"""
Sets user agent.
"""
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None):
# headers parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
for k, v in iteritems(path_params):
replacement = quote(str(self.to_path_value(v)))
resource_path = resource_path.\
replace('{' + k + '}', replacement)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = {k: self.to_path_value(v)
for k, v in iteritems(query_params)}
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.host + resource_path
# perform request and return response
response_data = self.request(method, url,
query_params=query_params,
headers=header_params,
post_params=post_params, body=body)
self.last_response = response_data
# deserialize response data
if response_type:
deserialized_data = self.deserialize(response_data, response_type)
else:
deserialized_data = None
if callback:
callback(deserialized_data)
else:
return deserialized_data
def to_path_value(self, obj):
"""
Takes value and turn it into a string suitable for inclusion in
the path, by url-encoding.
:param obj: object or string value.
:return string: quoted value.
"""
if type(obj) == list:
return ','.join(obj)
else:
return str(obj)
def sanitize_for_serialization(self, obj):
"""
Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
types = (str, int, float, bool, tuple)
if sys.version_info < (3,0):
types = types + (unicode,)
if isinstance(obj, type(None)):
return None
elif isinstance(obj, types):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
else:
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""
Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialzied object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if "file" == response_type:
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""
Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match('list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in iteritems(data)}
# convert str to class
# for native types
if klass in ['int', 'float', 'str', 'bool',
"date", 'datetime', "object"]:
klass = eval(klass)
# for model types
else:
klass = eval('models.' + klass)
if klass in [int, float, str, bool]:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == date:
return self.__deserialize_date(data)
elif klass == datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None):
"""
Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, define a function for callback.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param callback function: Callback function for asynchronous request.
If provide this parameter,
the request will be called asynchronously.
:return:
If provide parameter callback,
the request will be called asynchronously.
The method will return the request thread.
If parameter callback is None,
then the method will return the response directly.
"""
if callback is None:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings, callback)
else:
thread = threading.Thread(target=self.__call_api,
args=(resource_path, method,
path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
callback))
thread.start()
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None):
"""
Makes the HTTP request using RESTClient.
"""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers)
else:
raise ValueError(
"http method must be `GET`, `HEAD`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def prepare_post_parameters(self, post_params=None, files=None):
"""
Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = {}
if post_params:
params.update(post_params)
if files:
for k, v in iteritems(files):
if not v:
continue
with open(v, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = mimetypes.\
guess_type(filename)[0] or 'application/octet-stream'
params[k] = tuple([filename, filedata, mimetype])
return params
def select_header_accept(self, accepts):
"""
Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = list(map(lambda x: x.lower(), accepts))
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""
Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = list(map(lambda x: x.lower(), content_types))
if 'application/json' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters dict to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
config = Configuration()
if not auth_settings:
return
for auth in auth_settings:
auth_setting = config.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys[auth_setting['key']] = auth_setting['value']
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
config = Configuration()
fd, path = tempfile.mkstemp(dir=config.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.\
search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).\
group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "w") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""
Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, float, str, bool.
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = unicode(data)
except TypeError:
value = data
return value
def __deserialize_object(self, value):
"""
Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""
Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a date object"
.format(string)
)
def __deserialize_datatime(self, string):
"""
Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a datetime object".
format(string)
)
def __deserialize_model(self, data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
instance = klass()
for attr, attr_type in iteritems(instance.swagger_types):
if data is not None \
and instance.attribute_map[attr] in data\
and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
if attr_type == 'Metadata':
# Small hack. Because the generated code does not handle
# the metadata object well
setattr(instance, attr, value)
else:
setattr(instance, attr, self.__deserialize(value, attr_type))
return instance
| mit | -4,780,376,884,226,708,000 | 33.915808 | 84 | 0.53954 | false |
rfaulkner/databayes | http/databayes_api/views.py | 1 | 13088 | """
Defines the routing endpoints of the RESTful API for databayes.
Each method corresponds to an API action and returns the status of the action and the output. This
layer handles communication to the databayes daemon.
IMPORTANT NOTE! - Only one of these server instances should be running to avoid race conditions
"""
from databayes_api import app, log, redisio, config, \
gen_queue_id, exists_queue_item
import json, time
from flask import render_template, redirect, url_for, \
request, escape, flash, g, session, Response
ERR_MSG_BADLY_FORMED_REQUEST = 'Malformed request, try again'
# UTILITY METHODS
def handle_queue_validation():
"""
Method for handling queue validation in the view logic
:return:
"""
qid = str(gen_queue_id())
iterations = 0
while exists_queue_item(qid):
if iterations == config.REDIS_QUEUE_COUNTER_MAX:
return -1 # Indicates failure
qid = str(gen_queue_id())
iterations += 1
return str(qid)
def unpack_query_params(request):
"""
Helper method to fetch query paramaters for command requests
:param request:
:return:
"""
ret = dict()
ret['ok'] = True
ret['types'] = []
ret['fields'] = []
ret['fields1'] = []
ret['fields2'] = []
ret['values1'] = []
ret['values2'] = []
ret['message'] = ''
ret['fields'] = request.args.get('fields').split(',') \
if request.args.get('fields') else []
ret['types'] = request.args.get('types').split(',') \
if request.args.get('fields') else []
ret['fields1'] = request.args.get('fields1').split(',') \
if request.args.get('fields1') else []
ret['fields2'] = request.args.get('fields2').split(',') \
if request.args.get('fields2') else []
ret['values1'] = request.args.get('values1').split(',') \
if request.args.get('values1') else []
ret['values2'] = request.args.get('values2').split(',') \
if request.args.get('values2') else []
if len(ret['fields']) != len(ret['types']) or \
len(ret['fields1']) != len(ret['values1']) or \
len(ret['fields2']) != len(ret['values2']):
ret['ok'] = False
ret['message'] = 'Count of fields and types or values do not match'
return ret
def wait_for_response(qid, poll_frequency=10.0, max_tries=5):
"""
Handles polling a response from the redis queue determined by id. Returns
an empty response if it never arrives.
:param qid: int redis queue id
:param poll_frequency: int millisecond frequency of a poll
:param max_tries: int poll no more times than this
:return: string response written to redis from the daemon
"""
rsp = ""
for i in xrange(max_tries):
rsp = redisio.DataIORedis().read(config.DBY_RSP_QUEUE_PREFIX + qid)
if rsp: # got response, stop polling
break
time.sleep(float(poll_frequency) / 1000.0)
return rsp
# --- VIEW METHODS ---
# ====================
def get_arg_str(fields, values, delimiter):
"""
Synthesizes argument strings for entity attributes for databayes. Length
of fields and values must be equal.
:param fields: list of field names
:param values: list of field values
:param delimeter: str, relevant delimeter
:return: argument string
"""
items = []
for i in xrange(len(fields)):
items.append(str(fields[i]) + str(delimiter) + str(values[i]))
return ",".join(items)
def view_switch(view, args):
"""
General method which implements view logic
:param view: str, view to construct a response for
:param args: view arguments passed along
:return: text response from databayes or error
"""
log.debug('Processing view: "{0}"'.format(view))
log.debug('Processing args: "{0}"'.format(str(args)))
query_param_obj = unpack_query_params(request)
if (not query_param_obj['ok']):
return Response(json.dumps([query_param_obj['message']]),
mimetype='application/json')
# Retrieve a valid queue item
qid = handle_queue_validation()
if qid == -1:
return Response(json.dumps(['Queue is full, try again later.']),
mimetype='application/json')
# Construct command
cmd = ""
if view == 'define_entity':
if 'values' in query_param_obj.keys() and \
'fields' in query_param_obj.keys():
arg_str = get_arg_str(query_param_obj['fields'],
query_param_obj['values'], '_')
else:
arg_str = ""
log.info('Warning: entity has no attributes')
cmd = 'def {0}({1})'.format(args['entity'], arg_str) \
if arg_str else 'def ' + str(args['entity'])
elif view == 'add_relation':
arg_str_1 = get_arg_str(query_param_obj['fields1'], query_param_obj['values1'], '=')
arg_str_2 = get_arg_str(query_param_obj['fields2'], query_param_obj['values2'], '=')
cmd = 'add rel {0}({1}) {2}({3})'.format(args['entity_1'], arg_str_1,
args['entity_2'], arg_str_2)
elif view == 'generate':
pass
elif view == 'list_entity':
cmd = 'lst ent {0}'.format(args['pattern'])
elif view == 'list_relation':
arg_str_1 = get_arg_str(query_param_obj['fields1'], query_param_obj['values1'], '=')
arg_str_2 = get_arg_str(query_param_obj['fields2'], query_param_obj['values2'], '=')
cmd = 'lst rel {0}({1}) {2}({3})'.format(args['entity_1'], arg_str_1,
args['entity_2'], arg_str_2)
elif view == 'remove_entity':
cmd = 'rm ent {0}'.format(args['entity'])
elif view == 'remove_relation':
arg_str_1 = get_arg_str(query_param_obj['fields1'], query_param_obj['values1'], '=')
arg_str_2 = get_arg_str(query_param_obj['fields2'], query_param_obj['values2'], '=')
cmd = 'rm rel {0}({1}) {2}({3})'.format(args['entity_1'], arg_str_1,
args['entity_2'], arg_str_2)
log.info('sending command: "{0}"'.format(cmd))
# Send cmd to databayes daemon
redisio.DataIORedis().connect()
redisio.DataIORedis().write(config.DBY_CMD_QUEUE_PREFIX + qid, cmd)
# check response
rsp = wait_for_response(qid)
if not rsp:
rsp = "Could not find response before max retires expired."
return rsp
def home(entity):
"""
Defines web interface to the tool and help.
"""
# TODO - add content here, primarily an interface to an instance
# run on rackspace host
return Response("Welcome to databayes!",
mimetype='application/json')
def version(entity):
"""
Basic version info for databayes
"""
return Response("databayes v1. 2015. Ryan Faulkner",
mimetype='application/json')
def define_entity(entity):
"""
Handles remote requests to databayes for entity definition
Translation: def e(<f1>_<t1>, <f2>_<t2>, ...) ->
/def/e?fields=f1,f2,...&types=t1,t2,...
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps([view_switch('define_entity', {'entity': entity})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def add_relation(entity_1, entity_2):
"""
Handles remote requests to databayes for adding relations
Translation: add rel e1(<f1_1>_<v1_1>,...) e2(<f2_1>_<v2_1>,...) ->
/add/rel/e1/e2?fields1=f1_1,...&types1=t1_1,...
&fields2=f2_1,...&types2=t2_1,...
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps([view_switch(
'add_relation', {'entity_1': entity_1, 'entity_2': entity_2})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def generate(entity_1, entity_2):
"""
Handles remote requests to databayes for generating samples
Translation: gen e1(<f1_1>_<v1_1>,...) constrain e2(<f2_1>_<v2_1>,...) ->
/gen/e1/e2?fields1=f1_1,...&types1=t1_1,...&fields2=f2_1,...&types2=t2_1,...
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps(
[view_switch('generate',
{'entity_1': entity_1, 'entity_2': entity_2})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def list_entity(pattern):
"""
Handles remote requests to databayes for listing entities
Translation: lst ent regex -> /lst/ent/regex
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps([view_switch('list_entity', {'pattern': pattern})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def list_relation(entity_1, entity_2):
"""
Handles remote requests to databayes for listing relations
Translation: lst rel regex1 regex2 -> /lst/ent/regex1/regex2
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps(
[view_switch('list_relation',
{'entity_1': entity_1, 'entity_2': entity_2})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def remove_entity(entity):
"""
Handles remote requests to databayes for removing entities
Translation: rm ent e -> /rm/ent/e
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps([view_switch('remove_entity', {'entity': entity})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def remove_relation(entity_1, entity_2):
"""
Handles remote requests to databayes for removing relations
Translation: rm rel e1(<f1_1>_<v1_1>,...) e2(<f2_1>_<v2_1>,...)
-> /rm/rel/e1/e2?fields1=f1_1,...&values1=t1_1,...&fields2=f2_1,
...&values2=t2_1,...
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps(
[view_switch('remove_relation',
{'entity_1': entity_1, 'entity_2': entity_2})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
# Stores view references in structure
view_list = {
home.__name__: home,
version.__name__: version,
define_entity.__name__: define_entity,
add_relation.__name__: add_relation,
generate.__name__: generate,
list_entity.__name__: list_entity,
list_relation.__name__: list_relation,
remove_entity.__name__: remove_entity,
remove_relation.__name__: remove_relation,
}
route_deco = {
home.__name__: app.route('/', methods=['GET']),
version.__name__: app.route('/v', methods=['GET']),
define_entity.__name__: app.route('/def/<entity>', methods=['GET', 'POST']),
add_relation.__name__: app.route('/add/<entity_1>/<entity_2>', methods=['GET', 'POST']),
generate.__name__: app.route('/gen', methods=['GET', 'POST']),
list_entity.__name__: app.route('/lst/ent/<pattern>', methods=['GET', 'POST']),
list_relation.__name__: app.route('/lst/rel/<pattern_1>/<pattern_2>', methods=['GET', 'POST']),
remove_entity.__name__: app.route('/rm/ent/<entity>', methods=['GET', 'POST']),
remove_relation.__name__: app.route('/rm/rel/<entity_1>/<entity_2>', methods=['GET', 'POST']),
}
# Apply decorators to views
def init_views():
for key in route_deco:
log.info('Registering view - {0}'.format(key))
route = route_deco[key]
view_method = view_list[key]
view_list[key] = route(view_method)
| apache-2.0 | 8,312,816,424,362,879,000 | 34.468835 | 103 | 0.575718 | false |
adharaengine/AdharaDB | test.py | 1 | 9327 | import unittest
import tempfile
from tempfile import NamedTemporaryFile
from ZODB import DB, config
from ZODB.FileStorage import FileStorage
from db import Graph, Element, Edge, Node
from backends import DictionaryBackend, ZODBBTreeBackend
class TestGraph(unittest.TestCase):
def setUp(self):
self.g = Graph(DictionaryBackend())
def test_iter_(self):
elements = self.g.add_nodes(4)
elements.append(self.g.add_edge(elements[0], elements[1],{'keye':'valuee'}))
elements.append(self.g.add_edge(elements[0], elements[2],{'keye':'valuee'}))
self.assertEqual(len(elements),6)
for n in self.g:
self.assertIsInstance(n, Element)
self.assertIn(n, elements)
def test_getersetter_(self):
node = self.g.add_node({'keyn':'valuen'})
self.g[node] = {'keyn2':'valuen2'}
self.assertEqual(self.g[node], {'keyn':'valuen','keyn2':'valuen2'})
def test_add_node(self):
n = self.g.add_node()
self.assertIsInstance(n, Element)
for node in self.g.nodes:
self.assertIsInstance(node, Element)
self.assertIn(n, self.g.nodes)
def test_add_nodes(self):
nodes = self.g.add_nodes(4)
self.assertIsInstance(nodes,list)
self.assertEqual(len(nodes),4)
for n in nodes:
self.assertIsInstance(n, Element)
def test_nodes(self):
self.g.add_nodes(5)
for n in self.g.nodes:
self.assertIsInstance(n, Element)
def test_add_edge(self):
nodes = self.g.add_nodes(5)
for idx, val in enumerate(nodes):
try:
self.g.add_edge(val,nodes[idx+1])
except IndexError:
pass
for e in self.g.edges:
self.assertIsInstance(e, Element)
self.assertFalse(e.directed)
def test_add_directed_edge(self):
nodes = self.g.add_nodes(5)
for idx, val in enumerate(nodes):
try:
self.g.add_edge(val,nodes[idx+1],directed=True)
except IndexError:
pass
for e in self.g.edges:
self.assertIsInstance(e, Element)
self.assertTrue(e.directed)
def test_add_edges(self):
nodes = self.g.add_nodes(5)
edge_list = []
for idx, val in enumerate(nodes):
try:
edge_list.append((val,
nodes[idx+1],
{'test':1, 't':'test'}))
except IndexError:
pass
edges = self.g.add_edges(edge_list)
for e in edges:
self.assertIsInstance(e, Element)
def test_get_attributes(self):
node = self.g.add_node({'keyn':'valuen'})
node2 = self.g.add_node({'keyn2':'valuen2'})
edge = self.g.add_edge(node,node2,{'keye':'valuee'})
self.assertEqual(self.g[node],{'keyn':'valuen'})
self.assertEqual(self.g[node2],{'keyn2':'valuen2'})
self.assertEqual(self.g[edge],{'keye':'valuee'})
def test_add_attributes(self):
node = self.g.add_node({'keyn':'valuen'})
self.g[node] = {'keyn2':'valuen2'}
self.assertEqual(self.g[node], {'keyn':'valuen','keyn2':'valuen2'})
def test_del_node(self):
node = self.g.add_node({'keyn':'valuen'})
node2 = self.g.add_node({'keyn2':'valuen2'})
self.g.del_node(node2)
for n in self.g.nodes:
self.assertEqual(n,node)
for a in self.g.attribute_store:
self.assertEqual(a,node)
def test_del_edge(self):
node = self.g.add_node({'keyn':'valuen'})
node2 = self.g.add_node({'keyn2':'valuen2'})
node3 = self.g.add_node({'keyn3':'valuen3'})
edge = self.g.add_edge(node,node2,{'keye':'valuee'})
edge2 = self.g.add_edge(node,node3,{'keye':'valuee'})
edge3 = self.g.add_edge(node2,node3)
self.g.del_edge(edge2)
for e in self.g.edges:
self.assertIn(e,[edge,edge3])
for a in self.g.attribute_store:
self.assertIn(e,[node,node2,node3,edge,edge3])
def test_graph(self):
n1 = self.g.add_node()
n2 = self.g.add_node()
n3 = self.g.add_node()
e1 = self.g.add_edge(n1,n2)
e2 = self.g.add_edge(n1,n3)
self.assertIn(e1,self.g.edges)
self.assertIn(e2,self.g.edges)
self.assertIn(n2, n1.neighbors)
self.assertIn(n1, n2.neighbors)
self.assertIn(n3, n1.neighbors)
def test_add_weighted_node(self):
n = self.g.add_node(weight=7)
self.assertIsInstance(n, Element)
for node in self.g.nodes:
self.assertIsInstance(node, Element)
self.assertIn(n, self.g.nodes)
self.assertEqual(n.weight, 7)
class TestGraphZODB(TestGraph):
def setUp(self):
storage = FileStorage(NamedTemporaryFile().name)
db = DB(storage)
connection = db.open()
root = connection.root
self.g = Graph(backend=ZODBBTreeBackend(root))
class TestElement(unittest.TestCase):
def setUp(self):
self.g = Graph(DictionaryBackend())
def test_eq_ne_(self):
e = Element(self.g)
if e == e:
pass
else:
self.fail('Element object does not compare equal to itself!')
e2 = Element(self.g)
if e != e2:
pass
else:
self.fail('Two different Element objects compare equal!')
self.assertNotEqual(e, e2)
self.assertEqual(e, e)
def test_lt_gt_le_ge_(self):
e = Element(self.g)
e2 = Element(self.g)
#we don't know wether e2 will be less or greater than e, so test is limited
if e > e2:
pass
if e >= e2:
pass
if e < e2:
pass
if e <= e2:
pass
def test_hash_(self):
e = Element(self.g)
e2 = Element(self.g)
self.assertTrue(e.__hash__() != e2.__hash__())
def test_repr_(self):
e = Element(self.g)
self.assertIsInstance(e.__repr__(), str)
def test_setattr_(self):
e = Element(self.g)
with self.assertRaises(TypeError):
e.id = 3
def test_str_(self):
e = Element(self.g)
self.assertIsInstance(str(e), str)
def test_weight(self):
e = Element(self.g)
e.weight = 9
self.assertEqual(e.weight, 9)
class TestElementZODB(TestElement):
def setUp(self):
storage = FileStorage(NamedTemporaryFile().name)
db = DB(storage)
connection = db.open()
root = connection.root
self.g = Graph(backend=ZODBBTreeBackend(root))
class TestNode(TestElement):
def test_delete(self):
n = self.g.add_node()
n.delete()
self.assertNotIn(n, self.g)
def test_neighbors(self):
nodes = self.g.add_nodes(3)
self.g.add_edge(nodes[0], nodes[1])
self.g.add_edge(nodes[0], nodes[2])
self.assertIn(nodes[1], nodes[0].neighbors)
self.assertIn(nodes[2], nodes[0].neighbors)
def test_edges(self):
nodes = self.g.add_nodes(3)
e1 = self.g.add_edge(nodes[0], nodes[1])
e2 = self.g.add_edge(nodes[0], nodes[2])
self.assertIn(e1, nodes[0].edges)
self.assertIn(e2, nodes[0].edges)
for e in nodes[0].edges:
self.assertIsInstance(e, Edge)
def test_iter_(self):
nodes = self.g.add_nodes(3)
e1 = self.g.add_edge(nodes[0], nodes[1])
e2 = self.g.add_edge(nodes[0], nodes[2])
self.assertIn(e1, nodes[0])
self.assertIn(e2, nodes[0])
self.assertIn(nodes[1], nodes[0])
self.assertIn(nodes[2], nodes[0])
def test_getitem_setitem_(self):
node = self.g.add_node({'keyn':'valuen'})
node['keyn2'] = 'valuen2'
self.assertEqual(node['keyn'],'valuen')
self.assertEqual(node['keyn2'],'valuen2')
class TestNodeZODB(TestNode):
def setUp(self):
storage = FileStorage(NamedTemporaryFile().name)
db = DB(storage)
connection = db.open()
root = connection.root
self.g = Graph(backend=ZODBBTreeBackend(root))
class TestEdge(TestElement):
def test_delete(self):
n1 = self.g.add_node()
n2 = self.g.add_node()
e = self.g.add_edge(n1, n2)
e.delete()
self.assertNotIn(e, self.g)
def test_nodes(self):
nodes = self.g.add_nodes(2)
e1 = self.g.add_edge(nodes[0], nodes[1])
for n in e1.nodes:
self.assertIsInstance(n, Node)
self.assertIn(n, nodes)
def test_iter_(self):
nodes = self.g.add_nodes(2)
e1 = self.g.add_edge(nodes[0], nodes[1])
for n in e1:
self.assertIsInstance(n, Node)
self.assertIn(n, nodes)
def test_getitem_setitem_(self):
nodes = self.g.add_nodes(2)
e1 = self.g.add_edge(nodes[0], nodes[1])
e1['key2'] = 'value2'
self.assertEqual(e1['key2'],'value2')
class TestEdgeZODB(TestEdge):
def setUp(self):
storage = FileStorage(NamedTemporaryFile().name)
db = DB(storage)
connection = db.open()
root = connection.root
self.g = Graph(backend=ZODBBTreeBackend(root))
| apache-2.0 | 717,199,211,216,425,300 | 29.184466 | 84 | 0.563525 | false |
brianjimenez/lightdock | lightdock/test/gso/test_coordinates.py | 1 | 6927 | """Tests for Coordinates class"""
from nose.tools import assert_almost_equals
from nose.tools import raises
import os
from lightdock.gso.coordinates import Coordinates
from lightdock.gso.coordinates import CoordinatesFileReader
from lightdock.error.lightdock_errors import GSOCoordinatesError
class TestCoordinates:
def setUp(self):
self.values_2D = [1.0, 2.0]
self.values_3D = [1.0, 2.0, 3.0]
def tearDown(self):
pass
def test_create_coordinates_2D(self):
coordinates = Coordinates(self.values_2D)
assert coordinates.dimension == 2
for i in range(coordinates.dimension):
assert_almost_equals(self.values_2D[i], coordinates[i])
def test_create_coordinates_3D(self):
coordinates = Coordinates(self.values_3D)
assert coordinates.dimension == 3
for i in range(coordinates.dimension):
assert_almost_equals(self.values_3D[i], coordinates[i])
def test_check_equal_coordinates_2D(self):
coordinates1 = Coordinates(self.values_2D)
coordinates2 = Coordinates(self.values_2D)
assert coordinates1 == coordinates2
assert not coordinates1 != coordinates2
def test_check_not_equal_coordinates_2D_and_3D(self):
coordinates1 = Coordinates(self.values_2D)
coordinates2 = Coordinates(self.values_3D)
assert not coordinates1 == coordinates2
assert coordinates1 != coordinates2
def test_index_assigment(self):
coordinates = Coordinates(self.values_2D)
assert_almost_equals(self.values_2D[0], coordinates[0])
assert_almost_equals(self.values_2D[1], coordinates[1])
coordinates[0] = -1.0
assert_almost_equals(-1.0, coordinates[0])
assert_almost_equals(self.values_2D[1], coordinates[1])
def test_clone_coordinates(self):
coordinates1 = Coordinates(self.values_2D)
coordinates2 = coordinates1.clone()
assert coordinates1 == coordinates2
coordinates2[0] = -1.0
assert coordinates1 != coordinates2
def test_coordinates_addition(self):
coordinates1 = Coordinates(self.values_2D)
coordinates2 = Coordinates(self.values_2D)
expected = Coordinates([2.0, 4.0])
assert expected == coordinates1 + coordinates2
def test_coordinates_subtraction(self):
coordinates1 = Coordinates(self.values_2D)
coordinates2 = Coordinates(self.values_2D)
expected = Coordinates([0.0, 0.0])
assert expected == coordinates1 - coordinates2
def test_coordinates_addition_and_assigment(self):
coordinates1 = Coordinates(self.values_2D)
coordinates2 = Coordinates(self.values_2D)
expected = Coordinates([2.0, 4.0])
coordinates1 += coordinates2
assert expected == coordinates1
def test_coordinates_subtraction_and_assigment(self):
coordinates1 = Coordinates(self.values_2D)
coordinates2 = Coordinates(self.values_2D)
expected = Coordinates([0.0, 0.0])
coordinates1 -= coordinates2
assert expected == coordinates1
def test_norm(self):
coordinates = Coordinates(self.values_2D)
assert_almost_equals(2.236067977, coordinates.norm())
def test_distance_same_coordinate(self):
coordinates = Coordinates(self.values_2D)
assert_almost_equals(0.0, coordinates.distance(coordinates))
def test_distance_different_coordinates(self):
coordinates1 = Coordinates([0., 0., 0.])
coordinates2 = Coordinates([20., 0., 21.])
assert_almost_equals(29.0, coordinates1.distance(coordinates2))
def test_distance2_same_coordinate(self):
coordinates = Coordinates(self.values_2D)
assert_almost_equals(0.0, coordinates.distance2(coordinates))
def test_sum_of_squares(self):
coordinates = Coordinates(self.values_2D)
assert_almost_equals(5.0, coordinates.sum_of_squares())
def test_distance2_different_coordinates(self):
coordinates1 = Coordinates(self.values_2D)
coordinates2 = Coordinates([2.0, 3.0])
assert_almost_equals(2.0, coordinates1.distance2(coordinates2))
def test_multiplication_and_assigment(self):
coordinates = Coordinates(self.values_3D)
expected = Coordinates([-3.0, -6.0, -9.0])
coordinates *= -3.0
assert expected == coordinates
def test_multiplication(self):
coordinates = Coordinates(self.values_3D)
expected = Coordinates([-3.0, -6.0, -9.0])
assert expected == (coordinates * -3.0)
def test_move_different_coordinates(self):
coordinates1 = Coordinates(self.values_2D)
coordinates2 = Coordinates([0.0, 1.0])
expected = Coordinates([-1.12132034356, -0.12132034356])
assert expected == coordinates1.move(coordinates2, 3.0)
def test_move_same_coordinate(self):
coordinates1 = Coordinates(self.values_2D)
assert coordinates1 == coordinates1.move(coordinates1)
class TestCoordinatesFileReader:
def setUp(self):
self.golden_data_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__))) + '/golden_data/'
def tearDown(self):
pass
def test_read_coordinates_from_file(self):
reader = CoordinatesFileReader(2)
coordinates = reader.get_coordinates_from_file(self.golden_data_path + 'initial_positions.txt')
assert 50 == len(coordinates)
assert "(0.745916, -0.92056)" == str(coordinates[0])
assert "(-2.29363, -0.229427)" == str(coordinates[9])
assert "(0.617171, -2.85014)" == str(coordinates[-1])
@raises(GSOCoordinatesError)
def test_read_coordinates_from_file_with_errors(self):
reader = CoordinatesFileReader(2)
coordinates = reader.get_coordinates_from_file(self.golden_data_path + 'initial_positions_with_error.txt')
assert len(coordinates)
@raises(GSOCoordinatesError)
def test_read_coordinates_from_file_with_error_in_column(self):
reader = CoordinatesFileReader(2)
coordinates = reader.get_coordinates_from_file(self.golden_data_path + 'initial_positions_with_wrong_column.txt')
assert len(coordinates)
@raises(GSOCoordinatesError)
def test_read_coordinates_from_file_no_file(self):
reader = CoordinatesFileReader(2)
coordinates = reader.get_coordinates_from_file(self.golden_data_path + 'no_file.txt')
assert len(coordinates)
| gpl-3.0 | 703,583,084,849,713,400 | 33.123153 | 121 | 0.630143 | false |
jkyeung/XlsxWriter | xlsxwriter/test/table/test_table01.py | 1 | 1893 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...table import Table
from ...worksheet import Worksheet
from ...workbook import WorksheetMeta
from ...sharedstrings import SharedStringTable
class TestAssembleTable(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
worksheet.add_table('C3:F13')
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F13" totalsRowShown="0">
<autoFilter ref="C3:F13"/>
<tableColumns count="4">
<tableColumn id="1" name="Column1"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Column4"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause | -5,498,686,703,020,115,000 | 32.210526 | 163 | 0.566297 | false |
purisc-group/purisc | compiler/class_def/conversions/arithmetic.py | 1 | 29184 | from helpers import next_subleq
from helpers import subleq
from helpers import clear
import re
def add(instr, assem):
a = instr.args[0];
b = instr.args[1];
c = instr.result;
t0 = assem.getNextTemp();
#check for literals
if re.match("\d+",a):
if a not in assem.dataMem:
assem.dataMem[a] = a;
if re.match("\d+",b):
if b not in assem.dataMem:
assem.dataMem[b] = b;
assem.progMem.append("\n// " + instr.raw);
assem.subleq(t0,t0,"NEXT");
assem.subleq(a,t0,"NEXT");
assem.subleq(b,t0,"NEXT");
assem.subleq(c,c,"NEXT");
assem.subleq(t0,c,"NEXT");
def sub(instr, assem):
a = instr.args[0];
b = instr.args[1];
c = instr.result;
#check for literals
if re.match("\d+",a):
if a not in assem.dataMem:
assem.dataMem[a] = a;
if re.match("\d+",b):
if b not in assem.dataMem:
assem.dataMem[b] = b;
assem.progMem.append("\n // " + instr.raw);
assem.subleq(c,c,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(a,t0,"NEXT");
assem.subleq(t0,c,"NEXT");
assem.subleq(b,c,"NEXT");
def mul(instr, assem):
arg1 = instr.args[0];
arg2 = instr.args[1];
result = instr.result;
c = assem.getNextReserved("workingResult"); # will hold the value of the negative answer until it is flipped at the end if necessary
a = assem.getNextReserved("mul");
b = assem.getNextReserved("mul");
flip = assem.getNextReserved("flip");
i0 = assem.getNextReserved("i");
operand = assem.getNextReserved("operand");
power = assem.getNextReserved("power");
decomp = assem.getNextReserved("decomp");
decomp_ = assem.getNextReserved("mul_decomp_");
powers = assem.getNextReserved("powers");
p_ = "powersOf2_";
#labels
flipA = assem.getNextReserved("flipA");
checkB = assem.getNextReserved("checkB");
flipB = assem.getNextReserved("flipB");
continue0 = assem.getNextReserved("continue0_");
continue1 = assem.getNextReserved("continue1_");
aLess = assem.getNextReserved("aLess");
continue2 = assem.getNextReserved("continue2_");
begin = assem.getNextReserved("begin");
p_0 = assem.getNextReserved("p_0_");
d_0 = assem.getNextReserved("d_0_");
p_1 = assem.getNextReserved("p_1_");
less = assem.getNextReserved("less");
test = assem.getNextReserved("test");
restore = assem.getNextReserved("restore");
continue3 = assem.getNextReserved("continue3_");
begin2 = assem.getNextReserved("begin2_");
d_2 = assem.getNextReserved("d_2_");
d_3 = assem.getNextReserved("d_3_");
d_4 = assem.getNextReserved("d_4_");
add = assem.getNextReserved("add");
regardless = assem.getNextReserved("regardless");
flipSign = assem.getNextReserved("flipSign");
finish = assem.getNextReserved("finish");
noflipA = assem.getNextReserved("noFlipA");
noflipB = assem.getNextReserved("noFlipB");
t0 = assem.getNextTemp();
t1 = assem.getNextTemp();
t3 = assem.getNextTemp();
t4 = assem.getNextTemp();
assem.progMem.append("\n// " + instr.raw);
#determine the sign of the result
assem.subleq(a,a,"NEXT"); #check the sign of A
assem.subleq(b,b,"NEXT");
assem.subleq(flip,flip,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(arg1,t0,noflipA);
assem.subleq(t0,a,"NEXT");
assem.subleq(1,flip,checkB);
assem.subleq(noflipA + ":" + arg1,a,"NEXT");
assem.subleq(checkB + ":" + t0,t0,"NEXT"); #check the sign of B
assem.subleq(arg2,t0,noflipB);
assem.subleq(t0,b,"NEXT");
assem.subleq(-1,flip,"NEXT");
assem.subleq(t0,t0,continue0);
assem.subleq(noflipB + ":" + arg2,b,"NEXT");
#determine the operand
assem.subleq(continue0 + ":" + operand,operand,"NEXT");
assem.subleq(power,power,"NEXT");
assem.subleq(a,b,aLess);
assem.subleq(a,power,"NEXT");
assem.subleq(b,operand,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(power,t0,"NEXT");
assem.subleq(t0,operand,"NEXT");
assem.subleq(t0,t0,continue1);
assem.subleq(aLess + ":" + a,operand,"NEXT");
assem.subleq(b,power,"NEXT");
assem.subleq(t0,t0,'NEXT');
assem.subleq(operand,t0,"NEXT");
assem.subleq(t0,power,"NEXT");
#decompose the operand into powers of 2
#maxPower = -1;
# for i = 30 -> 0
#if operand - 2^i >= 0
#powers[i] = 1
#operand = operand - 2^i
#maxPower == -1
#maxPower = i
#if operand - 2^i == 0:
#break;
two_i = assem.getNextReserved("two_i");
decomp_i = assem.getNextReserved("decomp_i");
restore = assem.getNextReserved("restore");
maxPower = assem.getNextReserved("maxPower");
maxFlag = assem.getNextReserved("maxFlag");
notMax = assem.getNextReserved("notMax");
continue2 = assem.getNextReserved("continue2");
incr0 = assem.getNextReserved("inc");
loop0 = assem.getNextReserved("loop");
t4 = assem.getNextTemp();
assem.dataMem[-2] = -2;
assem.dataMem[0] = 0;
#setup loop
assem.subleq(continue1 + ":" + i0,i0,"NEXT");
assem.subleq(-30,i0,"NEXT");
assem.subleq(two_i,two_i,"NEXT");
assem.subleq("powersOf2_",two_i,"NEXT");
assem.subleq(30,two_i,"NEXT");
assem.subleq(decomp_i,decomp_i,"NEXT");
assem.subleq("mul_decomp_",decomp_i,"NEXT");
assem.subleq(30,decomp_i,"NEXT");
assem.subleq(maxPower,maxPower,"NEXT");
assem.subleq(maxFlag,maxFlag,"NEXT");
assem.subleq(-2,maxFlag, "NEXT");
assem.subleq(loop0 + ":" + p_0,p_0,"NEXT");
assem.subleq(two_i,p_0,"NEXT");
assem.subleq(d_0,d_0,"NEXT");
assem.subleq(decomp_i,d_0,"NEXT");
assem.subleq(p_1,p_1,"NEXT");
assem.subleq(two_i,p_1,"NEXT");
assem.subleq(p_0 + ":#1",operand,"NEXT"); #operand = operand - 2^i
assem.subleq(-1,operand,restore); #add one to handle zero case
assem.subleq(1,operand,"NEXT");
assem.subleq(-1,d_0 + ":#1","NEXT"); #subtract the one
assem.subleq(1,maxFlag,notMax);
assem.subleq(i0,maxPower,"NEXT");
assem.subleq(notMax + ":0",operand,continue2);
assem.subleq(t0,t0,incr0);
assem.subleq(restore + ":" + t0,t0,"NEXT");
assem.subleq(p_1 + ":#1",t0,"NEXT");
assem.subleq(t0,operand,"NEXT");
assem.subleq(1,operand,"NEXT");
#decrement and repeat if necessary
assem.subleq(incr0 + ":-1",decomp_i,"NEXT");
assem.subleq(-1,two_i,"NEXT");
assem.subleq(1,i0,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(i0,t0,loop0);
#do successive additions of powers of 2
i1 = assem.getNextReserved("i");
adder = assem.getNextReserved("adder");
op = assem.getNextReserved("op");
loop2 = assem.getNextReserved("loop");
continue3 = assem.getNextReserved("continue3");
continueLoop = assem.getNextReserved("contLoop");
d_3 = assem.getNextReserved("d_3");
noADD = assem.getNextReserved("noAdd");
assem.subleq(continue2 + ":" + i1,i1,"NEXT");
assem.subleq("2938483",t0,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(maxPower,t0,"NEXT")
assem.subleq(t1,t1,"NEXT");
assem.subleq(t0,t1,"NEXT");
assem.subleq(maxPower,maxPower,"NEXT");
assem.subleq(t1,maxPower,"NEXT");
assem.subleq(adder,adder,"NEXT");
assem.subleq(op,op,"NEXT");
assem.subleq(power,op,"NEXT");
assem.subleq(op,adder,'NEXT');
assem.subleq(decomp_i,decomp_i,"NEXT");
assem.subleq("mul_decomp_",decomp_i,"NEXT");
assem.subleq(c,c,"NEXT");
assem.subleq(loop2 + ":" + maxPower,i1,continueLoop); #for i = 0 -> maxPower
assem.subleq(t0,t0,continue3);
assem.subleq(continueLoop + ":" + t0,t0,"NEXT");
assem.subleq(d_3,d_3,"NEXT");
assem.subleq(decomp_i,d_3,"NEXT");
assem.subleq(maxPower,t0,"NEXT"); #restore i to what it was before comparison
assem.subleq(t0,i1,"NEXT");
assem.subleq(0,d_3 + ":#1",noADD);
assem.subleq(adder,c,"NEXT");
assem.subleq(noADD + ":" + t0,t0,"NEXT");
assem.subleq(adder,t0,"NEXT");
assem.subleq(t0,adder,"NEXT");
#increment stuff
assem.subleq(-1,i1,"NEXT");
assem.subleq(1,decomp_i,"NEXT");
assem.subleq(t0,t0,loop2);
assem.subleq(continue3 + ":" + t0,t0,"NEXT");
#determine sign. c is the negative right now so flip if flip flag == 0
done = assem.getNextReserved("done");
ansPos = assem.getNextReserved("ansPos");
ansNeg = assem.getNextReserved("ansNeg");
'''assem.subleq(result,result,"NEXT");
assem.subleq(flip,result,"NEXT");
assem.subleq(t0,t0,"#-1");'''
assem.subleq(-1,flip,ansNeg);
assem.subleq(1,flip,ansPos);
assem.subleq(t0,t0,ansNeg);
assem.subleq(ansPos + ":" + result,result,"NEXT");
assem.subleq(c,result,"NEXT");
assem.subleq(t0,t0,done);
assem.subleq(ansNeg + ":" + t0,t0,"NEXT");
assem.subleq(c,t0,"NEXT");
assem.subleq(t0,result,"NEXT");
assem.subleq(done + ":" + t0,t0,"NEXT");
assem.dataMem["1"] = "#1";
assem.dataMem["-30"] = "#-30";
assem.dataMem["0"] = "#0";
assem.dataMem["30"] = "#30";
assem.dataMem["-1"] = "#-1";
assem.dataMem["2"] = "#2";
assem.dataMem["2938483"] = "#2938483";
#space for the powers of 2
assem.dataMem["powersOf2_1"] = "#1"
assem.dataMem["powersOf2_2"] = "#2"
assem.dataMem["powersOf2_4"] = "#4"
assem.dataMem["powersOf2_8"] = "#8"
assem.dataMem["powersOf2_16"] = "#16"
assem.dataMem["powersOf2_32"] = "#32"
assem.dataMem["powersOf2_64"] = "#64"
assem.dataMem["powersOf2_128"] = "#128"
assem.dataMem["powersOf2_256"] = "#256"
assem.dataMem["powersOf2_512"] = "#512"
assem.dataMem["powersOf2_1024"] = "#1024"
assem.dataMem["powersOf2_2048"] = "#2048"
assem.dataMem["powersOf2_4096"] = "#4096"
assem.dataMem["powersOf2_8192"] = "#8192"
assem.dataMem["powersOf2_16384"] = "#16384"
assem.dataMem["powersOf2_32768"] = "#32768"
assem.dataMem["powersOf2_65536"] = "#65536"
assem.dataMem["powersOf2_131072"] = "#131072"
assem.dataMem["powersOf2_262144"] = "#262144"
assem.dataMem["powersOf2_524288"] = "#524288"
assem.dataMem["powersOf2_1048576"] = "#1048576"
assem.dataMem["powersOf2_2097152"] = "#2097152"
assem.dataMem["powersOf2_4194304"] = "#4194304"
assem.dataMem["powersOf2_8388608"] = "#8388608"
assem.dataMem["powersOf2_16777216"] = "#16777216"
assem.dataMem["powersOf2_33554432"] = "#33554432"
assem.dataMem["powersOf2_67108864"] = "#67108864"
assem.dataMem["powersOf2_134217728"] = "#134217728"
assem.dataMem["powersOf2_268435456"] = "#268435456"
assem.dataMem["powersOf2_536870912"] = "#536870912"
assem.dataMem["powersOf2_1073741824"] = "#1073741824"
assem.dataMem["powersOf2_"] = "&powersOf2_1"
#space for the decomposition, will be reused every multiplication
assem.dataMem["mul_decomp_0"] = "#0"
assem.dataMem["mul_decomp_1"] = "#0"
assem.dataMem["mul_decomp_2"] = "#0"
assem.dataMem["mul_decomp_3"] = "#0"
assem.dataMem["mul_decomp_4"] = "#0"
assem.dataMem["mul_decomp_5"] = "#0"
assem.dataMem["mul_decomp_6"] = "#0"
assem.dataMem["mul_decomp_7"] = "#0"
assem.dataMem["mul_decomp_8"] = "#0"
assem.dataMem["mul_decomp_9"] = "#0"
assem.dataMem["mul_decomp_10"] = "#0"
assem.dataMem["mul_decomp_11"] = "#0"
assem.dataMem["mul_decomp_12"] = "#0"
assem.dataMem["mul_decomp_13"] = "#0"
assem.dataMem["mul_decomp_14"] = "#0"
assem.dataMem["mul_decomp_15"] = "#0"
assem.dataMem["mul_decomp_16"] = "#0"
assem.dataMem["mul_decomp_17"] = "#0"
assem.dataMem["mul_decomp_18"] = "#0"
assem.dataMem["mul_decomp_19"] = "#0"
assem.dataMem["mul_decomp_20"] = "#0"
assem.dataMem["mul_decomp_21"] = "#0"
assem.dataMem["mul_decomp_22"] = "#0"
assem.dataMem["mul_decomp_23"] = "#0"
assem.dataMem["mul_decomp_24"] = "#0"
assem.dataMem["mul_decomp_25"] = "#0"
assem.dataMem["mul_decomp_26"] = "#0"
assem.dataMem["mul_decomp_27"] = "#0"
assem.dataMem["mul_decomp_28"] = "#0"
assem.dataMem["mul_decomp_29"] = "#0"
assem.dataMem["mul_decomp_30"] = "#0"
assem.dataMem["mul_decomp_"] = "&mul_decomp_0"
def div(instr, assem):
arg1 = instr.args[0];
arg2 = instr.args[1];
c = instr.result;
a = assem.getNextReserved("A");
b = assem.getNextReserved("B");
num = assem.getNextReserved("num");
denom = assem.getNextReserved("denom");
t0 = assem.getNextTemp();
t1 = assem.getNextTemp();
flip = assem.getNextReserved("flip");
noflipA = assem.getNextReserved("noflipA");
noflipB = assem.getNextReserved("noflipB");
checkB = assem.getNextReserved("checkB");
continue0 = assem.getNextReserved("continue");
continue1 = assem.getNextReserved("continue");
zero = assem.getNextReserved("zero");
done = assem.getNextReserved("done");
i0 = assem.getNextReserved("i");
loop0 = assem.getNextReserved("loop");
d_0 = assem.getNextReserved("d_0");
d_1 = assem.getNextReserved("d_1");
d_2 = assem.getNextReserved("d_2");
d_3 = assem.getNextReserved("d_3");
d_prev_0 = assem.getNextReserved("d_prev_0");
d_prev_1 = assem.getNextReserved("d_prev_1");
d_prev_2 = assem.getNextReserved("d_prev_2");
assem.progMem.append("\n// " + instr.raw);
#check for signs
assem.subleq(a,a,"NEXT"); #check the sign of A
assem.subleq(b,b,"NEXT");
assem.subleq(flip,flip,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(arg1,t0,noflipA);
assem.subleq(arg1,a,"NEXT");
assem.subleq(1,flip,checkB);
assem.subleq(noflipA + ":" + t0,a,"NEXT");
assem.subleq(checkB + ":" + t0,t0,"NEXT"); #check the sign of B
assem.subleq(arg2,t0,noflipB);
assem.subleq(t0,b,"NEXT");
assem.subleq(-1,flip,"NEXT");
assem.subleq(t0,t0,continue1);
assem.subleq(noflipB + ":" + arg2,b,"NEXT");
#compute d*2^i
assem.subleq(continue1 + ":" + b,"div_d_pwrs_0","NEXT");
assem.subleq(i0,i0,"NEXT");
assem.subleq(-1,i0,"NEXT");
#for i = 1 -> 30
assem.subleq(loop0 + ":" + t0,t0,"NEXT");
assem.subleq(t1,t1,"NEXT");
assem.subleq("div_d_pwrs_",t1,"NEXT"); #dereference d[i]
assem.subleq(i0,t1,"NEXT");
assem.subleq(d_0,d_0,"NEXT"); #change the appropriate instructions pointing to d[i]
assem.subleq(t1,d_0,"NEXT");
assem.subleq(d_1,d_1,"NEXT");
assem.subleq(t1,d_1,"NEXT");
assem.subleq(d_2,d_2,"NEXT");
assem.subleq(t1,d_2,"NEXT");
assem.subleq(d_3,d_3,"NEXT");
assem.subleq(t1,d_3,"NEXT");
assem.subleq(-1,t1,"NEXT"); #dereference d[i-1]
assem.subleq(d_prev_0,d_prev_0,"NEXT"); #rewrite the appropriate instructions pointing to d[i-1]
assem.subleq(t1,d_prev_0,"NEXT");
assem.subleq(d_prev_0 + ":#1",t0,"NEXT");
assem.subleq(d_0 + ":#1",d_1 + ":#1", "NEXT");
assem.subleq(t0,d_2 + ":#1","NEXT");
assem.subleq(t0,d_3 + ":#1","NEXT");
assem.subleq(-1,i0,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(i0,t0,"NEXT");
assem.subleq(t1,t1,"NEXT");
assem.subleq(t0,t1,"NEXT");
assem.subleq(30,t1,loop0);
#for i = 30 -> 0
#if n - d*2^i >= 0
#n = n - d
#result += 2^i
# if n-d*2^i == 0
#break
loop1 = assem.getNextReserved("loop");
n = assem.getNextReserved("n");
i1 = assem.getNextReserved("i");
inc = assem.getNextReserved("inc");
restore = assem.getNextReserved("restore");
break0 = assem.getNextReserved("break0");
continue2 = assem.getNextReserved("continue2");
d_i = "d_i"; #pointer to d*2^i
two_i = "two_i"; #pointer to 2^i
d_0 = assem.getNextReserved("d_0");
d_1 = assem.getNextReserved("d_1");
p_0 = assem.getNextReserved("p_0");
assem.subleq(c,c,"NEXT");
assem.subleq(n,n,"NEXT"); #setupt loop
assem.subleq(t0,t0,"NEXT");
assem.subleq(a,t0,"NEXT");
assem.subleq(t0,n,"NEXT")
assem.subleq(i1,i1,"NEXT");
assem.subleq(-30,i1,"NEXT");
assem.subleq(loop1 + ":" + d_0,d_0,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(d_i,t0,"NEXT");
assem.subleq(t0,d_0,"NEXT");
assem.subleq(d_1,d_1,"NEXT");
assem.subleq(t0,d_1,"NEXT");
assem.subleq(p_0,p_0,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(two_i,t0,"NEXT");
assem.subleq(t0,p_0,"NEXT");
assem.subleq(d_0 + ":#1",n,"NEXT");
assem.subleq(-1,n,restore);
assem.subleq(t1,t1,"NEXT");
assem.subleq(p_0 + ":#1",t1,"NEXT");
assem.subleq(t1,c,"NEXT");
assem.subleq(1,n,break0); #restore n to n = n -d*2^i and also break if necessary
assem.subleq(t0,t0,inc);
assem.subleq(break0 + ":" + t0,t0,continue2);
assem.subleq(restore + ":" + t0,t0,"NEXT");
assem.subleq(d_1 + ":#1",t0,"NEXT");
assem.subleq(t0,n,"NEXT");
assem.subleq(1,n,"NEXT");
assem.subleq(inc + ":1",i1,"NEXT"); #decrement and check
assem.subleq(1,d_i,"NEXT");
assem.subleq(1,two_i,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(i1,t0,loop1);
#assem.subleq(continue2 + ":" + t0,t0,"NEXT");
#fli if necessary
flipResult = assem.getNextReserved("flipResult");
assem.subleq(continue2 +":-1" ,flip,flipResult);
assem.subleq(1,flip,done);
assem.subleq(flipResult + ":" + t0,t0,"NEXT");
assem.subleq(c,t0,"NEXT");
assem.subleq(c,c,"NEXT");
assem.subleq(t1,t1,"NEXT");
assem.subleq(t0,t1,"NEXT");
assem.subleq(t1,c,"NEXT");
#done
assem.subleq(done + ":" + t0,t0,"NEXT");
assem.dataMem[-1] = -1;
assem.dataMem[1] = 1;
assem.dataMem[30] = 30;
assem.dataMem[-30] = -30;
assem.dataMem["div_d_pwrs_0"] = "#0"
assem.dataMem["div_d_pwrs_1"] = "#0"
assem.dataMem["div_d_pwrs_2"] = "#0"
assem.dataMem["div_d_pwrs_3"] = "#0"
assem.dataMem["div_d_pwrs_4"] = "#0"
assem.dataMem["div_d_pwrs_5"] = "#0"
assem.dataMem["div_d_pwrs_6"] = "#0"
assem.dataMem["div_d_pwrs_7"] = "#0"
assem.dataMem["div_d_pwrs_8"] = "#0"
assem.dataMem["div_d_pwrs_9"] = "#0"
assem.dataMem["div_d_pwrs_10"] = "#0"
assem.dataMem["div_d_pwrs_11"] = "#0"
assem.dataMem["div_d_pwrs_12"] = "#0"
assem.dataMem["div_d_pwrs_13"] = "#0"
assem.dataMem["div_d_pwrs_14"] = "#0"
assem.dataMem["div_d_pwrs_15"] = "#0"
assem.dataMem["div_d_pwrs_16"] = "#0"
assem.dataMem["div_d_pwrs_17"] = "#0"
assem.dataMem["div_d_pwrs_18"] = "#0"
assem.dataMem["div_d_pwrs_19"] = "#0"
assem.dataMem["div_d_pwrs_20"] = "#0"
assem.dataMem["div_d_pwrs_21"] = "#0"
assem.dataMem["div_d_pwrs_22"] = "#0"
assem.dataMem["div_d_pwrs_23"] = "#0"
assem.dataMem["div_d_pwrs_24"] = "#0"
assem.dataMem["div_d_pwrs_25"] = "#0"
assem.dataMem["div_d_pwrs_26"] = "#0"
assem.dataMem["div_d_pwrs_27"] = "#0"
assem.dataMem["div_d_pwrs_28"] = "#0"
assem.dataMem["div_d_pwrs_29"] = "#0"
assem.dataMem["div_d_pwrs_30"] = "#0"
assem.dataMem["div_d_pwrs_"] = "&div_d_pwrs_0"
assem.dataMem["powersOf2_1"] = "#1"
assem.dataMem["powersOf2_2"] = "#2"
assem.dataMem["powersOf2_4"] = "#4"
assem.dataMem["powersOf2_8"] = "#8"
assem.dataMem["powersOf2_16"] = "#16"
assem.dataMem["powersOf2_32"] = "#32"
assem.dataMem["powersOf2_64"] = "#64"
assem.dataMem["powersOf2_128"] = "#128"
assem.dataMem["powersOf2_256"] = "#256"
assem.dataMem["powersOf2_512"] = "#512"
assem.dataMem["powersOf2_1024"] = "#1024"
assem.dataMem["powersOf2_2048"] = "#2048"
assem.dataMem["powersOf2_4096"] = "#4096"
assem.dataMem["powersOf2_8192"] = "#8192"
assem.dataMem["powersOf2_16384"] = "#16384"
assem.dataMem["powersOf2_32768"] = "#32768"
assem.dataMem["powersOf2_65536"] = "#65536"
assem.dataMem["powersOf2_131072"] = "#131072"
assem.dataMem["powersOf2_262144"] = "#262144"
assem.dataMem["powersOf2_524288"] = "#524288"
assem.dataMem["powersOf2_1048576"] = "#1048576"
assem.dataMem["powersOf2_2097152"] = "#2097152"
assem.dataMem["powersOf2_4194304"] = "#4194304"
assem.dataMem["powersOf2_8388608"] = "#8388608"
assem.dataMem["powersOf2_16777216"] = "#16777216"
assem.dataMem["powersOf2_33554432"] = "#33554432"
assem.dataMem["powersOf2_67108864"] = "#67108864"
assem.dataMem["powersOf2_134217728"] = "#134217728"
assem.dataMem["powersOf2_268435456"] = "#268435456"
assem.dataMem["powersOf2_536870912"] = "#536870912"
assem.dataMem["powersOf2_1073741824"] = "#1073741824"
assem.dataMem["powersOf2_"] = "&powersOf2_1"
assem.dataMem["d_i"] = "&div_d_pwrs_30";
assem.dataMem["two_i"] = "&powersOf2_1073741824";
def mod(instr, assem):
arg1 = instr.args[0];
arg2 = instr.args[1];
c = instr.result;
a = assem.getNextReserved("A");
b = assem.getNextReserved("B");
num = assem.getNextReserved("num");
denom = assem.getNextReserved("denom");
t0 = assem.getNextTemp();
t1 = assem.getNextTemp();
flip = assem.getNextReserved("flip");
noflipA = assem.getNextReserved("noflipA");
noflipB = assem.getNextReserved("noflipB");
checkB = assem.getNextReserved("checkB");
continue0 = assem.getNextReserved("continue");
continue1 = assem.getNextReserved("continue");
zero = assem.getNextReserved("zero");
done = assem.getNextReserved("done");
i0 = assem.getNextReserved("i");
loop0 = assem.getNextReserved("loop");
d_0 = assem.getNextReserved("d_0");
d_1 = assem.getNextReserved("d_1");
d_2 = assem.getNextReserved("d_2");
d_3 = assem.getNextReserved("d_3");
d_prev_0 = assem.getNextReserved("d_prev_0");
d_prev_1 = assem.getNextReserved("d_prev_1");
d_prev_2 = assem.getNextReserved("d_prev_2");
assem.progMem.append("\n// " + instr.raw);
#check for signs
assem.subleq(a,a,"NEXT"); #check the sign of A
assem.subleq(b,b,"NEXT");
assem.subleq(flip,flip,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(arg1,t0,noflipA);
assem.subleq(arg1,a,"NEXT");
assem.subleq(1,flip,checkB);
assem.subleq(noflipA + ":" + t0,a,"NEXT");
assem.subleq(checkB + ":" + t0,t0,"NEXT"); #check the sign of B
assem.subleq(arg2,t0,noflipB);
assem.subleq(t0,b,"NEXT");
assem.subleq(-1,flip,"NEXT");
assem.subleq(t0,t0,continue1);
assem.subleq(noflipB + ":" + arg2,b,"NEXT");
#compute d*2^i
assem.subleq(continue1 + ":" + b,"div_d_pwrs_0","NEXT");
assem.subleq(i0,i0,"NEXT");
assem.subleq(-1,i0,"NEXT");
#for i = 1 -> 30
assem.subleq(loop0 + ":" + t0,t0,"NEXT");
assem.subleq(t1,t1,"NEXT");
assem.subleq("div_d_pwrs_",t1,"NEXT"); #dereference d[i]
assem.subleq(i0,t1,"NEXT");
assem.subleq(d_0,d_0,"NEXT"); #change the appropriate instructions pointing to d[i]
assem.subleq(t1,d_0,"NEXT");
assem.subleq(d_1,d_1,"NEXT");
assem.subleq(t1,d_1,"NEXT");
assem.subleq(d_2,d_2,"NEXT");
assem.subleq(t1,d_2,"NEXT");
assem.subleq(d_3,d_3,"NEXT");
assem.subleq(t1,d_3,"NEXT");
assem.subleq(-1,t1,"NEXT"); #dereference d[i-1]
assem.subleq(d_prev_0,d_prev_0,"NEXT"); #rewrite the appropriate instructions pointing to d[i-1]
assem.subleq(t1,d_prev_0,"NEXT");
assem.subleq(d_prev_0 + ":#1",t0,"NEXT");
assem.subleq(d_0 + ":#1",d_1 + ":#1", "NEXT");
assem.subleq(t0,d_2 + ":#1","NEXT");
assem.subleq(t0,d_3 + ":#1","NEXT");
assem.subleq(-1,i0,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(i0,t0,"NEXT");
assem.subleq(t1,t1,"NEXT");
assem.subleq(t0,t1,"NEXT");
assem.subleq(30,t1,loop0);
#for i = 30 -> 0
#if n - d*2^i >= 0
#n = n - d
#result += 2^i
# if n-d*2^i == 0
#break
loop1 = assem.getNextReserved("loop");
n = assem.getNextReserved("n");
i1 = assem.getNextReserved("i");
inc = assem.getNextReserved("inc");
restore = assem.getNextReserved("restore");
break0 = assem.getNextReserved("break0");
continue2 = assem.getNextReserved("continue2");
d_i = "d_i"; #pointer to d*2^i
two_i = "two_i"; #pointer to 2^i
d_0 = assem.getNextReserved("d_0");
d_1 = assem.getNextReserved("d_1");
p_0 = assem.getNextReserved("p_0");
assem.subleq(c,c,"NEXT");
assem.subleq(n,n,"NEXT"); #setupt loop
assem.subleq(t0,t0,"NEXT");
assem.subleq(a,t0,"NEXT");
assem.subleq(t0,n,"NEXT")
assem.subleq(i1,i1,"NEXT");
assem.subleq(-30,i1,"NEXT");
assem.subleq(loop1 + ":" + d_0,d_0,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(d_i,t0,"NEXT");
assem.subleq(t0,d_0,"NEXT");
assem.subleq(d_1,d_1,"NEXT");
assem.subleq(t0,d_1,"NEXT");
assem.subleq(p_0,p_0,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(two_i,t0,"NEXT");
assem.subleq(t0,p_0,"NEXT");
assem.subleq(d_0 + ":#1",n,"NEXT");
assem.subleq(-1,n,restore);
assem.subleq(t1,t1,"NEXT");
assem.subleq(p_0 + ":#1",t1,"NEXT");
assem.subleq(t1,c,"NEXT");
assem.subleq(1,n,break0); #restore n to n = n -d*2^i and also break if necessary
assem.subleq(t0,t0,inc);
assem.subleq(break0 + ":" + t0,t0,continue2);
assem.subleq(restore + ":" + t0,t0,"NEXT");
assem.subleq(d_1 + ":#1",t0,"NEXT");
assem.subleq(t0,n,"NEXT");
assem.subleq(1,n,"NEXT");
assem.subleq(inc + ":1",i1,"NEXT"); #decrement and check
assem.subleq(1,d_i,"NEXT");
assem.subleq(1,two_i,"NEXT");
assem.subleq(t0,t0,"NEXT");
assem.subleq(i1,t0,loop1);
#assem.subleq(continue2 + ":" + t0,t0,"NEXT");
#fli if necessary
flipResult = assem.getNextReserved("flipResult");
assem.subleq(continue2 +":-1" ,flip,flipResult);
assem.subleq(1,flip,done);
assem.subleq(flipResult + ":" + t0,t0,"NEXT");
assem.subleq(c,t0,"NEXT");
assem.subleq(c,c,"NEXT");
assem.subleq(t1,t1,"NEXT");
assem.subleq(t0,t1,"NEXT");
assem.subleq(t1,c,"NEXT");
#done
assem.subleq(done + ":" + t0,t0,"NEXT");
assem.dataMem[-1] = -1;
assem.dataMem[1] = 1;
assem.dataMem[30] = 30;
assem.dataMem[-30] = -30;
assem.dataMem["div_d_pwrs_0"] = "#0"
assem.dataMem["div_d_pwrs_1"] = "#0"
assem.dataMem["div_d_pwrs_2"] = "#0"
assem.dataMem["div_d_pwrs_3"] = "#0"
assem.dataMem["div_d_pwrs_4"] = "#0"
assem.dataMem["div_d_pwrs_5"] = "#0"
assem.dataMem["div_d_pwrs_6"] = "#0"
assem.dataMem["div_d_pwrs_7"] = "#0"
assem.dataMem["div_d_pwrs_8"] = "#0"
assem.dataMem["div_d_pwrs_9"] = "#0"
assem.dataMem["div_d_pwrs_10"] = "#0"
assem.dataMem["div_d_pwrs_11"] = "#0"
assem.dataMem["div_d_pwrs_12"] = "#0"
assem.dataMem["div_d_pwrs_13"] = "#0"
assem.dataMem["div_d_pwrs_14"] = "#0"
assem.dataMem["div_d_pwrs_15"] = "#0"
assem.dataMem["div_d_pwrs_16"] = "#0"
assem.dataMem["div_d_pwrs_17"] = "#0"
assem.dataMem["div_d_pwrs_18"] = "#0"
assem.dataMem["div_d_pwrs_19"] = "#0"
assem.dataMem["div_d_pwrs_20"] = "#0"
assem.dataMem["div_d_pwrs_21"] = "#0"
assem.dataMem["div_d_pwrs_22"] = "#0"
assem.dataMem["div_d_pwrs_23"] = "#0"
assem.dataMem["div_d_pwrs_24"] = "#0"
assem.dataMem["div_d_pwrs_25"] = "#0"
assem.dataMem["div_d_pwrs_26"] = "#0"
assem.dataMem["div_d_pwrs_27"] = "#0"
assem.dataMem["div_d_pwrs_28"] = "#0"
assem.dataMem["div_d_pwrs_29"] = "#0"
assem.dataMem["div_d_pwrs_30"] = "#0"
assem.dataMem["div_d_pwrs_"] = "&div_d_pwrs_0"
assem.dataMem["powersOf2_1"] = "#1"
assem.dataMem["powersOf2_2"] = "#2"
assem.dataMem["powersOf2_4"] = "#4"
assem.dataMem["powersOf2_8"] = "#8"
assem.dataMem["powersOf2_16"] = "#16"
assem.dataMem["powersOf2_32"] = "#32"
assem.dataMem["powersOf2_64"] = "#64"
assem.dataMem["powersOf2_128"] = "#128"
assem.dataMem["powersOf2_256"] = "#256"
assem.dataMem["powersOf2_512"] = "#512"
assem.dataMem["powersOf2_1024"] = "#1024"
assem.dataMem["powersOf2_2048"] = "#2048"
assem.dataMem["powersOf2_4096"] = "#4096"
assem.dataMem["powersOf2_8192"] = "#8192"
assem.dataMem["powersOf2_16384"] = "#16384"
assem.dataMem["powersOf2_32768"] = "#32768"
assem.dataMem["powersOf2_65536"] = "#65536"
assem.dataMem["powersOf2_131072"] = "#131072"
assem.dataMem["powersOf2_262144"] = "#262144"
assem.dataMem["powersOf2_524288"] = "#524288"
assem.dataMem["powersOf2_1048576"] = "#1048576"
assem.dataMem["powersOf2_2097152"] = "#2097152"
assem.dataMem["powersOf2_4194304"] = "#4194304"
assem.dataMem["powersOf2_8388608"] = "#8388608"
assem.dataMem["powersOf2_16777216"] = "#16777216"
assem.dataMem["powersOf2_33554432"] = "#33554432"
assem.dataMem["powersOf2_67108864"] = "#67108864"
assem.dataMem["powersOf2_134217728"] = "#134217728"
assem.dataMem["powersOf2_268435456"] = "#268435456"
assem.dataMem["powersOf2_536870912"] = "#536870912"
assem.dataMem["powersOf2_1073741824"] = "#1073741824"
assem.dataMem["powersOf2_"] = "&powersOf2_1"
assem.dataMem["d_i"] = "&div_d_pwrs_30";
assem.dataMem["two_i"] = "&powersOf2_1073741824";
def parseArgs(argStr):
arg1 = re.findall("(?<=\s)[^\s,]+(?=,)",argStr)[0];
arg2 = re.findall("(?<=,\s)\s*\S+",argStr)[0];
return [arg1.strip(),arg2.strip()]
| gpl-2.0 | 1,400,089,787,447,496,400 | 34.20386 | 136 | 0.601186 | false |
markmuetz/stormtracks | stormtracks/results.py | 1 | 3180 | import os
from glob import glob
import pandas as pd
from load_settings import settings
from utils.utils import compress_file, decompress_file
RESULTS_TPL = '{0}.hdf'
class ResultNotFound(Exception):
'''Simple exception thrown if result cannot be found in results manager or on disk'''
pass
class StormtracksResultsManager(object):
'''Manager class that is responsible for loading and saving all python results
Simple key/value store.
Load/saves to settings.OUTPUT_DIR.
'''
def __init__(self, name, output_dir=None):
self.name = name
if output_dir:
self.output_dir = output_dir
else:
self.output_dir = settings.OUTPUT_DIR
def save_result(self, year, result_key, result):
'''Saves a given result based on year, user chosen result_key'''
dirname = os.path.join(self.output_dir, self.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = RESULTS_TPL.format(year)
print('saving {0}'.format(filename))
path = os.path.join(dirname, filename)
result.to_hdf(path, result_key)
def get_result(self, year, result_key):
'''Returns a result from an HDF file.'''
dirname = os.path.join(self.output_dir, self.name)
filename = RESULTS_TPL.format(year)
path = os.path.join(dirname, filename)
try:
result = pd.read_hdf(path, result_key)
except Exception, e:
raise ResultNotFound
return result
def delete(self, year, result_key):
'''Deletes a specific result from disk'''
raise NotImplementedError('Not sure how to delete one result')
def compress_year(self, year, delete=False):
'''Compresses a given year's dir and then optionally deletes that year'''
year_filename = os.path.join(self.output_dir, self.name, RESULTS_TPL.format(year))
compressed_filename = compress_file(year_filename)
if delete:
self.delete_year(year)
return compressed_filename
def delete_year(self, year):
'''Deletes a year (use with caution!)'''
year_filename = os.path.join(self.output_dir, self.name, RESULTS_TPL.format(year))
os.remove(year_filename)
def decompress_year(self, year):
'''Decompresses a given year's tarball'''
filename = os.path.join(self.output_dir, self.name, '{0}.bz2'.format(RESULTS_TPL.format(year)))
decompress_file(filename)
def list_years(self):
'''List all saved years'''
years = []
dirname = os.path.join(self.output_dir, self.name)
for year_dirname in glob(os.path.join(dirname, '*')):
try:
year = int(os.path.splitext(os.path.basename(year_dirname))[0])
years.append(year)
except:
pass
return sorted(years)
def list_results(self, year):
'''List all results saved for a particular year'''
dirname = os.path.join(self.output_dir, self.name)
print(os.path.join(dirname, RESULTS_TPL.format(year)))
store = pd.HDFStore(os.path.join(dirname, RESULTS_TPL.format(year)))
results = [field[0][1:] for field in store.items()]
store.close()
return sorted(results)
| mit | -2,510,691,410,342,341,000 | 31.783505 | 103 | 0.650629 | false |
guolivar/totus-niwa | service/thirdparty/featureserver/FeatureServer/DataSource/Flickr.py | 1 | 4910 | from FeatureServer.DataSource import DataSource
from vectorformats.Feature import Feature
from FeatureServer.Exceptions.NoGeometryException import NoGeometryException
import md5
import urllib
from lxml import etree
from StringIO import StringIO
class Flickr (DataSource):
def __init__(self, name, api_key, api_secret, attributes = "*", srid_out = 4326, **args):
DataSource.__init__(self, name, **args)
self.api_key = api_key
self.api_secret = api_secret
self.srid_out = srid_out
self.attributes = attributes
self.api = FlickrAPI(self.api_key, self.api_secret)
def select (self, action):
features = []
if action.id is not None:
data = self.api.request({'method':'flickr.photos.getInfo','photo_id':action.id})
doc = etree.parse(StringIO(data)).getroot()
photo = doc.xpath('/rsp/photo')[0]
try:
features.append(self.convert_photo(photo))
except Exception as e:
''' '''
else:
params = {'method' : 'flickr.photos.search','extras':'description,owner_name,geo,tags,license'}
if action.bbox:
params['bbox'] = "%f,%f,%f,%f" % tuple(action.bbox)
if hasattr(self, 'user_id'):
params['user_id'] = self.user_id
if hasattr(self, 'tags'):
params['tags'] = self.tags
if hasattr(self, 'tag_mode'):
params['tag_mode'] = self.tag_mode
else:
params['tag_mode'] = "any"
data = self.api.request(params)
doc = etree.parse(StringIO(data)).getroot()
photos = [ photo for photo in doc.xpath('/rsp/photos')[0] ]
for photo in photos:
try:
features.append(self.convert_photo(photo))
except Exception as e:
continue
return features
def convert_photo (self, xml):
node_names = self.get_node_names(xml)
props = {'img_url' : self.get_url(xml)}
owners = xml.xpath('./owner')
if len(owners) > 0:
props['owner'] = owners[0].attrib['nsid']
props['username'] = owners[0].attrib['username']
for i in node_names:
if i == "tags":
tags = [ tag.text for tag in xml.xpath('./%s' % str(i))[0] ]
props[i] = ",".join(tags)
else:
nodes = xml.xpath('./%s' % str(i))
if len(nodes) > 0:
if len(list(nodes[0])) == 0:
if nodes[0].text is None:
props[i] = ""
else:
props[i] = nodes[0].text
try:
coordinates = self.get_coordinates(xml)
except:
raise
return Feature(id=xml.attrib["id"], geometry={'type':"Point", 'coordinates':coordinates}, geometry_attr="geometry", srs=self.srid_out, props=props)
def get_node_names(self, xml):
if self.attributes == "*":
props = [ child.tag for child in xml ]
props.remove("location")
props.remove("owner")
else:
props = self.attributes.split(',')
return props
def get_coordinates(self, xml):
location = xml.xpath('./location')
if len(location) > 0:
loc = location[0]
return [float(loc.attrib['longitude']), float(loc.attrib['latitude'])]
if "longitude" in xml.attrib and "latitude" in xml.attrib:
return [float(xml.attrib['longitude']), float(xml.attrib['latitude'])]
raise NoGeometryException("Twitter", self.name)
def get_url(self, xml):
return "http://farm%s.static.flickr.com/%s/%s_%s_b.jpg" % (xml.attrib['farm'], xml.attrib['server'], xml.attrib['id'], xml.attrib['secret'])
class FlickrAPI:
urls = {
'xml' : 'http://api.flickr.com/services/rest/'
}
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret
def request(self, params = {}, format = "rest"):
params['api_key'] = self.api_key
params['format'] = format
params['api_sig'] = self.signature(params)
return urllib.urlopen(self.urls["xml"], urllib.urlencode(params)).read()
def signature(self, params):
items = []
keys = params.keys()
keys.sort()
for key in keys:
items.append("%s%s" % (key,params[key]))
sign_string = "%s%s" % (self.api_secret, "".join(items))
return md5.md5(sign_string).hexdigest()
| gpl-3.0 | -8,789,628,092,533,478,000 | 31.959732 | 155 | 0.509776 | false |
rmsk2/Das-grosse-Quiz | client/playingfield.py | 1 | 19375 | ################################################################################
# Copyright 2016 Martin Grap
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
## @package playingfield Contains a class that implements the playing field of "Das grosse Quiz"
#
# \file playingfield.py
# \brief Contains a class that implements the playing field of "Das grosse Quiz".
import pickle
import questions
import displayclient
ERR_OK = 0
ERR_ERROR = 42
## \brief An excpetion class that is used for constructing exception objects in this module.
#
class PlayingFieldException(Exception):
## \brief An excpetion class that is used for constructing exception objects in this module.
#
# \param [error_message] Is a string. It has to contain an error message that is to be conveyed to
# receiver of the corresponding exception.
#
def __init__(self, error_message):
Exception.__init__(self, 'PlayingField error:' + error_message)
## \brief This class implements the playing field of "Das grosse Quiz".
#
# The central data structure is a multi level dictionary which is referenced through the self._field member.
# The outmost dictionary has the category names as its keys. The values of each of these categories is another
# dictionary that has the keys 20, 40, 60, 80, 100. The value of these keys is a third dictionary that has the
# keys 'answeredby' and 'wronganswersby'. The value for the 'answeredby' key is either None (when the question has
# not been answered yet) or a string which specifies the name of the team that answered the question. The key
# 'wronganswersby' has a set() as its value which contains the names of the team(s) that have given a wrong answer
# to the question.
#
class PlayingField:
## \brief Constructor.
#
# \param [question_repo] An object of type questions.QuestionRepository which holds information about questions, teams
# and network configuration.
#
def __init__(self, question_repo):
## \brief An object of type questions.QuestionRepository.
self._repo = question_repo
## \brief A list of strings. Each list element denotes a category.
self._categories = self._repo.categories
## \brief A multi level dictionary that holds the playing field information.
self._field = {}
## \brief A multi level dictionary that holds the question for each cell in the playing field.
self._questions = {}
## \brief A list of strings. Each list element denotes a name of a team.
self._teams = self._repo.teams
## \brief An object of type displayclient.SignClient which is used to talk to the displayserver.
self._sign_client = displayclient.SignClient(self._repo.config['host'], self._repo.config['port'])
## \brief An object of type questions.Question. It holds the question which is currently displayed by the displayserver.
self._current_question = None
field_column = {20:None, 40:None, 60:None, 80:None, 100:None}
# Initialize the self._field and self._questions dictionaries
for i in self._categories:
self._field[i] = field_column.copy()
self._questions[i] = field_column.copy()
# Retrieve all questions the the repository
for i in self._categories:
for j in [20, 40, 60, 80, 100]:
self._questions[i][j] = self._repo.get_question(i, j)
self._field[i][j] = {'answeredby':None, 'wronganswersby':set()}
## \brief Returns a reference to the playing field dictionary.
#
# \returns A dictionary as described in the class documentation.
#
@property
def playing_field(self):
return self._field
## \brief Returns a reference to displayserver client object which is in use in this PlayingField instance.
#
# \returns An object of type displayclient.SignClient.
#
@property
def raspi(self):
return self._sign_client
## \brief Returns a string describing the hostname and port which have been specified in the question repository that is used
# by this PlayingField instance.
#
# \returns A string.
#
@property
def server_info(self):
return '{}:{}'.format(self._repo.config['host'], self._repo.config['port'])
## \brief Returns a reference to the questions.Question object which represents the question currently displayed by the displaserver.
#
# \returns An object of type questions.Question or None.
#
@property
def current_question(self):
return self._current_question
## \brief This method allows to deserialize the current state of the playing field from a file.
#
# \param [file_name] A string. Has to contain the name of the file which contains a serialized state.
#
# \returns A boolean. A return value of True means that reconstructing the state was successfull.
#
def load_state(self, file_name):
result = ERR_OK
dumped_playing_field = None
try:
with open(file_name, 'rb') as f:
dumped_playing_field = f.read()
restored_playing_field = pickle.loads(dumped_playing_field)
for i in self._categories:
for j in [20, 40, 60, 80, 100]:
for t in restored_playing_field[i][j]['wronganswersby']:
if not (t in self.current_teams):
raise PlayingFieldException('Loaded state contains unknown team names')
# NB: If restored_playing_field[i][j]['answeredby'] contains an unknown team name the question is regarded as
# answered by noone.
self._field = restored_playing_field
self._current_question = None
except:
result = ERR_ERROR
return result
## \brief This method allows to serialize the current state of the playing field into a file.
#
# \param [file_name] A string. Has to contain the name of the file into which the serialized state should be stored.
#
# \returns A boolean. A return value of True means that saving the state was successfull.
#
def save_state(self, file_name):
result = ERR_OK
try:
dumped_playing_field = pickle.dumps(self._field)
with open(file_name, 'wb') as f:
f.write(dumped_playing_field)
except:
result = ERR_ERROR
return result
## \brief This method clears the state of playing field, i.e. sets all cells to the default value which
# means that no question has been answered either right or wrong yet.
#
# \returns Nothing.
#
def clear(self):
for i in self._categories:
for j in [20, 40, 60, 80, 100]:
self._field[i][j] = {'answeredby':None, 'wronganswersby':set()}
## \brief This method evaluates the current state of the playing field. It iterates over all cells and sums up the
# points earned by each team. A correct answer adds the value of the question to the team result. In case of
# a wrong answer the question value is substracted from the team result.
#
# \returns A dictionary. It maps a string key to an int value. Each key is the name of a team and its value
# is the number of points earned by the corresponding team.
#
def calc_result(self):
result = {}
for i in self._teams:
result[i] = 0
for i in self._categories:
for j in [20, 40, 60, 80, 100]:
if self._field[i][j]['answeredby'] in self._teams:
result[self._field[i][j]['answeredby']] += j
for k in self._field[i][j]['wronganswersby']:
if k in self._teams:
result[k] -= j
return result
## \brief This method evaluates the current state of the playing field. It iterates over all cells and counts the
# questions which have already been answered.
#
# \returns An int.
#
def num_questions_answered(self):
result = 0
for i in self._categories:
for j in [20, 40, 60, 80, 100]:
if self._field[i][j]['answeredby'] != None:
result += 1
return result
## \brief Instructs the displayserver to display the intro message and resets the value of self._current_question to None.
#
# \returns An int. A return value of 0 indicates a successfull execution.
#
def show_intro(self):
self._current_question = None
return self._sign_client.show_intro()
## \brief Instructs the displayserver to display the "Thank you" message and resets the value of self._current_question to None.
#
# \returns An int. A return value of 0 indicates a successfull execution.
#
def show_thanks(self):
self._current_question = None
return self._sign_client.show_thanks()
## \brief Instructs the displayserver to display final result message and resets the value of self._current_question to None.
#
# \returns An int. A return value of 0 indicates a successfull execution.
#
def show_result(self):
self._current_question = None
res = self.calc_result()
return self._sign_client.show_result(res)
## \brief Instructs the displayserver to display the playing field and resets the value of self._current_question to None.
#
# \returns An int. A return value of 0 indicates a successfull execution.
#
def show(self):
self._current_question = None
return self._sign_client.show_playing_field(self._field)
## \brief Records that a team has answered a question correctly. If the question has already been answered this method
# does nothing.
#
# \param [category] A string. Denotes the category of the question that has been answered correctly.
#
# \param [value] An int. Denotes the value of the question that has been answered correctly.
#
# \param [who_answered] A string. Specifies the name of the team which has answered the question correctly.
#
# \returns Nothing.
#
def answer_question(self, category, value, who_answered):
if (self._field[category][value]['answeredby'] == None) and (who_answered not in self._field[category][value]['wronganswersby']):
self._field[category][value]['answeredby'] = who_answered
self._current_question = None
## \brief Resets the state of a question to its default value (no correct and no wrong answers).
#
# \param [category] A string. Denotes the category of the question that has been answered correctly.
#
# \param [value] An int. Denotes the value of the question that has been answered correctly.
#
# \returns Nothing.
#
def clear_question(self, category, value):
self._field[category][value]['answeredby'] = None
self._field[category][value]['wronganswersby'] = set()
self._current_question = None
## \brief Records that a team has given a wrong answer to a question. If the question has already been answered this method
# does nothing.
#
# \param [category] A string. Denotes the category of the question that has been answered wrongly.
#
# \param [value] An int. Denotes the value of the question that has been answered wrongly.
#
# \param [who_answered] A string. Specifies the name of the team which has answered the question wrongly.
#
# \returns Nothing.
#
def wrong_answer_question(self, category, value, who_answered):
if self._field[category][value]['answeredby'] == None:
self._field[category][value]['wronganswersby'].add(who_answered)
## \brief Records that a team has answered the current question correctly. If the question has already been answered this method
# does nothing. Additionaly this method instructs the displayserver to show the playing field again. The current question
# is also reset to None.
#
# \param [who_answered] A string. Specifies the name of the team which has answered the question correctly.
#
# \returns An int. A value of 0 indicates that displaying the playing field was successfull.
#
def answer_current_question(self, who_answered):
result = ERR_OK
if self._current_question != None:
c = self._current_question.category
v = self._current_question.value
if (self._field[c][v]['answeredby'] == None) and (who_answered not in self._field[c][v]['wronganswersby']):
self.answer_question(c, v, who_answered)
result = self.show()
return result
## \brief Resets the state of the current question to its default value (no correct and no wrong answers). Additionaly this method instructs
# the displayserver to show the playing field again. The current question is also reset to None.
#
# \returns An int. A value of 0 indicates that displaying the playing field was successfull.
#
def clear_current_question(self):
result = ERR_OK
if self._current_question != None:
self.clear_question(self._current_question.category, self._current_question.value)
result = self.show()
return result
## \brief Records that a team has answered the current question wrongly. If the question has already been answered this method
# does nothing.
#
# \param [who_answered] A string. Specifies the name of the team which has given a wrong answer.
#
# \returns Nothing.
#
def wrong_answer_current_question(self, who_answered):
if self._current_question != None:
self.wrong_answer_question(self._current_question.category, self._current_question.value, who_answered)
## \brief Returns the category names in use in this PlayingField instance.
#
# \returns A list of strings. The strings denote the category names and the list is sorted.
#
@property
def current_categories(self):
result = self._categories[:]
result.sort()
return result
## \brief Returns the names of the three teams names in use in this PlayingField instance.
#
# \returns A list of strings. The strings denote the team names and the list is sorted.
#
@property
def current_teams(self):
result = self._teams[:]
result.sort()
return result
## \brief Returns the name of the team that has answered the specified question correctly.
#
# \param [category] A string. Denotes the category of the question for which the answer information is to be retrieved.
#
# \param [value] An int. Denotes the value of the question for which the answer information is to be retrieved.
#
# \returns A string. The name of the team which has given a correct answer or None in case the question
# has not been answered yet.
#
def question_answered_by(self, category, value):
return self._field[category][value]['answeredby']
## \brief Returns the names of the teams that have given a wrong answer to the specified question.
#
# \param [category] A string. Denotes the category of the question for which the answer information is to be retrieved.
#
# \param [value] An int. Denotes the value of the question for which the answer information is to be retrieved.
#
# \returns A set of strings. The set contains the names of the teams which have given a wrong answer.
#
def question_answered_wrong_by(self, category, value):
return self._field[category][value]['wronganswersby']
## \brief This method instructs the display server to show a certain question. This question then becomes the current question
# and the time value which specifies how many seconds remain to answer the question is set to its start value.
#
# \param [category] A string. Denotes the category of the question which is to become the current question.
#
# \param [value] An int. Denotes the value of the question which is to become the current question.
#
# \returns An int. A value of 0 indicates that displaying the question was successfull.
#
def ask_question(self, category, value):
question = self._questions[category][value]
time = question.time_allowance
if not question.show_time:
time = -1
self._current_question = question
self._current_question.reset()
return self._sign_client.show_question(question.text, time)
## \brief This method decrements the number of seconds that remain to answer the current question and updates the display to
# reflect the changed timer value.
#
# \returns An int. A value of 0 indicates that displaying the question was successfull.
#
def decrement_question_time(self):
result = ERR_OK
# Check if there is a valid current question, that its timer value is positive and that a time value should be displayed
if (self._current_question != None) and (self._current_question.current_time > 0) and (self._current_question.show_time):
self._current_question.current_time -= 1
result = self._sign_client.show_question(self._current_question.text, self._current_question.current_time)
return result
| apache-2.0 | 3,814,428,693,370,683,400 | 45.130952 | 145 | 0.612284 | false |
tcstewar/embodied_benchmarks | control.py | 1 | 3938 | import numpy as np
class Signal(object):
def __init__(self, D, L, dt, max_freq, seed=None):
rng = np.random.RandomState(seed=seed)
steps = int(max_freq * L)
self.w = 2 * np.pi * np.arange(steps) / L
self.A = rng.randn(D, steps) + 1.0j * rng.randn(D, steps)
power = np.sqrt(np.sum(self.A * self.A.conj()))
self.A /= power
def value(self, t):
s = np.sin(self.w * t) * self.A
return np.sum(s, axis=1).real
def dvalue(self, t):
s = np.cos(self.w * t) * self.w * self.A
return np.sum(s, axis=1).real
class Environment(object):
def __init__(self, seed=None):
self.rng = np.random.RandomState(seed=seed)
class LinearSystem(Environment):
def __init__(self, d_controlled, d_motor, dt=0.001, seed=None,
scale_mult=10, scale_add=10, diagonal=False,
max_sense_noise=0.1, max_motor_noise=0.1,
period=5.0, max_freq=1.0):
super(LinearSystem, self).__init__(seed=seed)
self.d_motor = d_motor
self.d_controlled = d_controlled
self.dt = dt
self.state = self.rng.randn(d_controlled)
if diagonal:
assert d_controlled == d_motor
self.J = np.abs(np.diag(self.rng.randn(d_motor))) * scale_mult
else:
self.J = self.rng.randn(d_motor, d_controlled) * scale_mult
self.sense_noise = self.rng.uniform(0, max_sense_noise)
self.motor_noise = self.rng.uniform(0, max_motor_noise)
self.additive = self.rng.rand(d_controlled) * scale_add
def step(self, motor):
motor = motor + self.rng.randn(self.d_motor) * self.motor_noise
dstate = (np.dot(motor, self.J) + self.additive) * self.dt
self.state = self.state + dstate
return self.state + self.rng.randn(self.d_controlled) * self.sense_noise
class Controller(object):
pass
class PID(Controller):
def __init__(self, Kp, Kd=0, Ki=0, J=None, tau_d=0.1, dt=0.001):
self.Kp = Kp
self.Kd = Kd
self.Ki = Ki
if J is not None:
x = np.dot(J.T, J)
scale = np.linalg.det(x) ** (1.0 / x.shape[0])
self.JT = J.T / scale
else:
self.JT = None
self.prev_state = None
self.dstate = None
self.istate = None
self.scale = np.exp(-dt / tau_d)
self.dt = dt
def step(self, state, desired_state):
if self.prev_state is None:
self.prev_state = None
self.dstate = np.zeros_like(state)
self.istate = np.zeros_like(state)
else:
d = state - self.prev_state
self.dstate = self.dstate * self.scale + d * (1.0 - self.scale)
self.istate += self.dt * (desired_state - state)
v = (self.Kp * (desired_state - state) +
self.Kd * (-self.dstate) +
self.Ki * self.istate)
if self.JT is not None:
v = np.dot(v, self.JT)
return v
if __name__ == '__main__':
D_state = 3
D_motor = 5
dt = 0.001
env = LinearSystem(d_controlled=D_state, d_motor=D_motor, diagonal=False, scale_add=5)
ctrl = PID(100, 10, 1000, J=env.J)
desired_state = Signal(D_state, L=3.0, dt=dt, max_freq=2.0)
T = 6.0
steps = int(T / dt)
t = np.arange(steps) * dt
state = np.zeros((D_state, steps), dtype=float)
desired = np.zeros((D_state, steps), dtype=float)
sense = np.zeros((D_state, steps), dtype=float)
m = np.zeros(D_motor, dtype=float)
for i in range(steps):
desired[:,i] = desired_state.value(t[i])
s = env.step(m)
m = ctrl.step(s, desired[:,i])
state[:,i] = env.state
sense[:,i] = s
import pylab
pylab.plot(t, state.T, label='state')
pylab.plot(t, desired.T, label='desired')
#pylab.plot(sense.T, label='sense')
#pylab.legend(loc='best')
pylab.show()
| gpl-2.0 | 7,529,953,417,347,471,000 | 28.38806 | 90 | 0.550279 | false |
mariodebian/jclic-browser | python-examples/demo.py | 1 | 3681 | # This is an example for demonstrating use of the GtkTreeView widget.
# The code in this example is not particularly good: it is written to
# concentrate on widget usage demonstration, not for maintainability.
import pygtk
pygtk.require("2.0")
import gtk
import gobject
view = None
choose_parent_view = None
dialog = None
def move(old_iter, new_parent, model):
if old_iter:
folder = model.get_value(old_iter, 0)
model.remove(old_iter)
new_iter = model.insert_before(new_parent, None)
model.set_value(new_iter, 0, folder)
model.set_value(new_iter, 1, folder["name"])
def dialog_ok(*args):
dialog.hide()
model, parent_iter = choose_parent_view.get_selection().get_selected()
model, old_iter = view.get_selection().get_selected()
if parent_iter and old_iter:
move(old_iter, parent_iter, model)
def dialog_cancel(*args):
dialog.hide()
def choose_parent(*args):
dialog.show()
def move_to_top(*args):
model, old_iter = view.get_selection().get_selected()
if old_iter:
move(old_iter, None, model)
def quit(*args):
gtk.main_quit()
def make_view(model):
# Create the view itself.
view = gtk.TreeView(model)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Folder", renderer, text=1)
view.append_column(column)
view.show()
# Create scrollbars around the view.
scrolled = gtk.ScrolledWindow()
scrolled.add(view)
scrolled.show()
return view, scrolled
def make_buttons(list):
buttonbox = gtk.HBox()
for label, func in list:
button = gtk.Button()
button.set_label(label)
button.connect("clicked", func)
button.show()
buttonbox.pack_start(button, expand=gtk.FALSE, fill=gtk.FALSE)
buttonbox.show()
return buttonbox
def main():
# Create the model.
model = gtk.TreeStore(gobject.TYPE_PYOBJECT, gobject.TYPE_STRING)
# Populate the model with data. We represent folders with Python
# dicts (hash tables or hashmaps in other languages), for simplicity.
# In a real program, they would be programmer defined classes.
for i in range(100):
folder = { "name": "folder %d" % i, "files": ["foo", "bar"] }
iter = model.insert_before(None, None)
model.set_value(iter, 0, folder)
model.set_value(iter, 1, folder["name"])
# Create the main view.
global view
view, scrolled = make_view(model)
view.set_reorderable(gtk.TRUE)
# Create some command buttons.
buttonbox = make_buttons([("Quit", quit), ("Choose parent", choose_parent),
("Move to top", move_to_top)])
# Create a vertical box to hold the above stuff.
vbox = gtk.VBox()
vbox.pack_start(buttonbox, expand=gtk.FALSE, fill=gtk.FALSE)
vbox.pack_start(scrolled, expand=gtk.TRUE, fill=gtk.TRUE)
vbox.show()
# Create toplevel window to show it all.
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
win.connect("delete_event", quit)
win.add(vbox)
win.show()
win.resize(300, 500)
# Create the GtkTreeView for choosing a parent.
global choose_parent_view
choose_parent_view, scrolled = make_view(model)
buttonbox = make_buttons([("OK", dialog_ok), ("Cancel", dialog_cancel)])
vbox = gtk.VBox()
vbox.pack_start(scrolled, expand=gtk.TRUE, fill=gtk.TRUE)
vbox.pack_start(buttonbox, expand=gtk.FALSE, fill=gtk.FALSE)
vbox.show()
global dialog
dialog = gtk.Window(gtk.WINDOW_TOPLEVEL)
dialog.set_default_size(200, 400)
dialog.add(vbox)
# Run the Gtk+ main loop.
gtk.main()
if __name__ == "__main__":
main()
| gpl-2.0 | 3,050,538,211,119,226,000 | 28.685484 | 79 | 0.646292 | false |
blckshrk/Weboob | contrib/windows-install/ez_setup.py | 1 | 11838 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
import platform
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "1.1.6"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _check_call_py24(cmd, *args, **kwargs):
res = subprocess.call(cmd, *args, **kwargs)
class CalledProcessError(Exception):
pass
if not res == 0:
msg = "Command '%s' return non-zero exit status %d" % (cmd, res)
raise CalledProcessError(msg)
vars(subprocess).setdefault('check_call', _check_call_py24)
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U setuptools'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
subprocess.check_call(cmd)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
subprocess.check_call(cmd)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
subprocess.check_call(cmd)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return False
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15,
downloader_factory=get_best_downloader):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base,
downloader_factory=options.downloader_factory)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 | -6,922,584,917,193,558,000 | 30.994595 | 87 | 0.622149 | false |
KingxBanana/zulip | zproject/settings.py | 1 | 41223 | from __future__ import absolute_import
# Django settings for zulip project.
########################################################################
# Here's how settings for the Zulip project work:
#
# * settings.py contains non-site-specific and settings configuration
# for the Zulip Django app.
# * settings.py imports prod_settings.py, and any site-specific configuration
# belongs there. The template for prod_settings.py is prod_settings_template.py
#
# See http://zulip.readthedocs.io/en/latest/settings.html for more information
#
########################################################################
import os
import platform
import time
import sys
import six.moves.configparser
from zerver.lib.db import TimeTrackingConnection
import six
########################################################################
# INITIAL SETTINGS
########################################################################
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
config_file = six.moves.configparser.RawConfigParser()
config_file.read("/etc/zulip/zulip.conf")
# Whether this instance of Zulip is running in a production environment.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
DEVELOPMENT = not PRODUCTION
secrets_file = six.moves.configparser.RawConfigParser()
if PRODUCTION:
secrets_file.read("/etc/zulip/zulip-secrets.conf")
else:
secrets_file.read(os.path.join(DEPLOY_ROOT, "zproject/dev-secrets.conf"))
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
# Make this unique, and don't share it with anybody.
SECRET_KEY = get_secret("secret_key")
# A shared secret, used to authenticate different parts of the app to each other.
SHARED_SECRET = get_secret("shared_secret")
# We use this salt to hash a user's email into a filename for their user-uploaded
# avatar. If this salt is discovered, attackers will only be able to determine
# that the owner of an email account has uploaded an avatar to Zulip, which isn't
# the end of the world. Don't use the salt where there is more security exposure.
AVATAR_SALT = get_secret("avatar_salt")
# SERVER_GENERATION is used to track whether the server has been
# restarted for triggering browser clients to reload.
SERVER_GENERATION = int(time.time())
if 'DEBUG' not in globals():
# Uncomment end of next line to test JS/CSS minification.
DEBUG = DEVELOPMENT # and platform.node() != 'your-machine'
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
# Detect whether we're running as a queue worker; this impacts the logging configuration.
if len(sys.argv) > 2 and sys.argv[0].endswith('manage.py') and sys.argv[1] == 'process_queue':
IS_WORKER = True
else:
IS_WORKER = False
# This is overridden in test_settings.py for the test suites
TEST_SUITE = False
# The new user tutorial is enabled by default, but disabled for client tests.
TUTORIAL_ENABLED = True
# This is overridden in test_settings.py for the test suites
CASPER_TESTS = False
# Import variables like secrets from the prod_settings file
# Import prod_settings after determining the deployment/machine type
if PRODUCTION:
from .prod_settings import *
else:
from .dev_settings import *
########################################################################
# DEFAULT VALUES FOR SETTINGS
########################################################################
# For any settings that are not defined in prod_settings.py,
# we want to initialize them to sane default
DEFAULT_SETTINGS = {'TWITTER_CONSUMER_KEY': '',
'TWITTER_CONSUMER_SECRET': '',
'TWITTER_ACCESS_TOKEN_KEY': '',
'TWITTER_ACCESS_TOKEN_SECRET': '',
'EMAIL_GATEWAY_PATTERN': '',
'EMAIL_GATEWAY_EXAMPLE': '',
'EMAIL_GATEWAY_BOT': None,
'EMAIL_GATEWAY_LOGIN': None,
'EMAIL_GATEWAY_PASSWORD': None,
'EMAIL_GATEWAY_IMAP_SERVER': None,
'EMAIL_GATEWAY_IMAP_PORT': None,
'EMAIL_GATEWAY_IMAP_FOLDER': None,
'EMAIL_GATEWAY_EXTRA_PATTERN_HACK': None,
'S3_KEY': '',
'S3_SECRET_KEY': '',
'S3_AVATAR_BUCKET': '',
'LOCAL_UPLOADS_DIR': None,
'MAX_FILE_UPLOAD_SIZE': 25,
'ERROR_REPORTING': True,
'STAGING_ERROR_NOTIFICATIONS': False,
'EVENT_LOGS_ENABLED': False,
'SAVE_FRONTEND_STACKTRACES': False,
'JWT_AUTH_KEYS': {},
'NAME_CHANGES_DISABLED': False,
'DEPLOYMENT_ROLE_NAME': "",
'RABBITMQ_HOST': 'localhost',
'RABBITMQ_USERNAME': 'zulip',
'MEMCACHED_LOCATION': '127.0.0.1:11211',
'RATE_LIMITING': True,
'REDIS_HOST': '127.0.0.1',
'REDIS_PORT': 6379,
# The following bots only exist in non-VOYAGER installs
'ERROR_BOT': None,
'NEW_USER_BOT': None,
'NAGIOS_STAGING_SEND_BOT': None,
'NAGIOS_STAGING_RECEIVE_BOT': None,
'APNS_CERT_FILE': None,
'APNS_KEY_FILE': None,
'APNS_SANDBOX': True,
'ANDROID_GCM_API_KEY': None,
'INITIAL_PASSWORD_SALT': None,
'FEEDBACK_BOT': '[email protected]',
'FEEDBACK_BOT_NAME': 'Zulip Feedback Bot',
'ADMINS': '',
'SHARE_THE_LOVE': False,
'INLINE_IMAGE_PREVIEW': True,
'INLINE_URL_EMBED_PREVIEW': False,
'CAMO_URI': '',
'ENABLE_FEEDBACK': PRODUCTION,
'SEND_MISSED_MESSAGE_EMAILS_AS_USER': False,
'SERVER_EMAIL': None,
'FEEDBACK_EMAIL': None,
'WELCOME_EMAIL_SENDER': None,
'EMAIL_DELIVERER_DISABLED': False,
'ENABLE_GRAVATAR': True,
'DEFAULT_AVATAR_URI': '/static/images/default-avatar.png',
'AUTH_LDAP_SERVER_URI': "",
'EXTERNAL_URI_SCHEME': "https://",
'ZULIP_COM': False,
'SHOW_OSS_ANNOUNCEMENT': False,
'REGISTER_LINK_DISABLED': False,
'LOGIN_LINK_DISABLED': False,
'ABOUT_LINK_DISABLED': False,
'CUSTOM_LOGO_URL': None,
'VERBOSE_SUPPORT_OFFERS': False,
'STATSD_HOST': '',
'OPEN_REALM_CREATION': False,
'REALMS_HAVE_SUBDOMAINS': False,
'SUBDOMAINS_HOMEPAGE': False,
'ROOT_SUBDOMAIN_ALIASES': ["www"],
'REMOTE_POSTGRES_HOST': '',
'REMOTE_POSTGRES_SSLMODE': '',
# Default GOOGLE_CLIENT_ID to the value needed for Android auth to work
'GOOGLE_CLIENT_ID': '835904834568-77mtr5mtmpgspj9b051del9i9r5t4g4n.apps.googleusercontent.com',
'SOCIAL_AUTH_GITHUB_KEY': None,
'SOCIAL_AUTH_GITHUB_ORG_NAME': None,
'SOCIAL_AUTH_GITHUB_TEAM_ID': None,
'SOCIAL_AUTH_FIELDS_STORED_IN_SESSION': ['subdomain'],
'DBX_APNS_CERT_FILE': None,
'DBX_APNS_KEY_FILE': None,
'PERSONAL_ZMIRROR_SERVER': None,
'EXTRA_INSTALLED_APPS': [],
'DEFAULT_NEW_REALM_STREAMS': {
"social": {"description": "For socializing", "invite_only": False},
"general": {"description": "For general stuff", "invite_only": False},
"zulip": {"description": "For zulip stuff", "invite_only": False}
},
'REALM_CREATION_LINK_VALIDITY_DAYS': 7,
'TERMS_OF_SERVICE': None,
'TOS_VERSION': None,
'SYSTEM_ONLY_REALMS': {"zulip.com"},
'FIRST_TIME_TOS_TEMPLATE': None,
'USING_PGROONGA': False,
'POST_MIGRATION_CACHE_FLUSHING': False,
'ENABLE_FILE_LINKS': False,
'USE_WEBSOCKETS': True,
}
for setting_name, setting_val in six.iteritems(DEFAULT_SETTINGS):
if setting_name not in vars():
vars()[setting_name] = setting_val
# Extend ALLOWED_HOSTS with localhost (needed to RPC to Tornado).
ALLOWED_HOSTS += ['127.0.0.1', 'localhost']
# These are the settings that we will check that the user has filled in for
# production deployments before starting the app. It consists of a series
# of pairs of (setting name, default value that it must be changed from)
REQUIRED_SETTINGS = [("EXTERNAL_HOST", "zulip.example.com"),
("ZULIP_ADMINISTRATOR", "[email protected]"),
# SECRET_KEY doesn't really need to be here, in
# that we set it automatically, but just in
# case, it seems worth having in this list
("SECRET_KEY", ""),
("AUTHENTICATION_BACKENDS", ()),
("NOREPLY_EMAIL_ADDRESS", "[email protected]"),
("DEFAULT_FROM_EMAIL", "Zulip <[email protected]>"),
("ALLOWED_HOSTS", ["*", '127.0.0.1', 'localhost']),
]
if ADMINS == "":
ADMINS = (("Zulip Administrator", ZULIP_ADMINISTRATOR),)
MANAGERS = ADMINS
# Voyager is a production zulip server that is not zulip.com or
# staging.zulip.com VOYAGER is the standalone all-on-one-server
# production deployment model for based on the original Zulip
# ENTERPRISE implementation. We expect most users of the open source
# project will be using VOYAGER=True in production.
VOYAGER = PRODUCTION and not ZULIP_COM
########################################################################
# STANDARD DJANGO SETTINGS
########################################################################
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# The ID, as an integer, of the current site in the django_site database table.
# This is used so that application data can hook into specific site(s) and a
# single database can manage content for multiple sites.
#
# We set this site's domain to 'zulip.com' in populate_db.
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
# this directory will be used to store logs for development environment
DEVELOPMENT_LOG_DIRECTORY = os.path.join(DEPLOY_ROOT, 'var', 'log')
# Make redirects work properly behind a reverse proxy
USE_X_FORWARDED_HOST = True
# List of callables that know how to import templates from various sources.
LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
if PRODUCTION:
# Template caching is a significant performance win in production.
LOADERS = [('django.template.loaders.cached.Loader', LOADERS)]
TEMPLATES = [
{
'BACKEND': 'zproject.jinja2.backends.Jinja2',
'DIRS': [
os.path.join(DEPLOY_ROOT, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'environment': 'zproject.jinja2.environment',
'extensions': [
'jinja2.ext.i18n',
'jinja2.ext.autoescape',
'pipeline.jinja2.PipelineExtension',
],
'context_processors': [
'zerver.context_processors.add_settings',
'zerver.context_processors.add_metrics',
'django.template.context_processors.i18n',
],
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(DEPLOY_ROOT, 'django_templates'),
],
'APP_DIRS': False,
'OPTIONS': {
'debug': DEBUG,
'loaders': LOADERS,
'context_processors': [
'zerver.context_processors.add_settings',
'zerver.context_processors.add_metrics',
],
},
},
]
MIDDLEWARE_CLASSES = (
# Our logging middleware should be the first middleware item.
'zerver.middleware.TagRequests',
'zerver.middleware.LogRequests',
'zerver.middleware.JsonErrorHandler',
'zerver.middleware.RateLimitMiddleware',
'zerver.middleware.FlushDisplayRecipientCache',
'django.middleware.common.CommonMiddleware',
'zerver.middleware.SessionHostDomainMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ANONYMOUS_USER_ID = None
AUTH_USER_MODEL = "zerver.UserProfile"
TEST_RUNNER = 'zerver.lib.test_runner.Runner'
ROOT_URLCONF = 'zproject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'zproject.wsgi.application'
# A site can include additional installed apps via the
# EXTRA_INSTALLED_APPS setting
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'confirmation',
'guardian',
'pipeline',
'zerver',
'social.apps.django_app.default',
]
if USING_PGROONGA:
INSTALLED_APPS += ['pgroonga']
INSTALLED_APPS += EXTRA_INSTALLED_APPS
ZILENCER_ENABLED = 'zilencer' in INSTALLED_APPS
# Base URL of the Tornado server
# We set it to None when running backend tests or populate_db.
# We override the port number when running frontend tests.
TORNADO_SERVER = 'http://127.0.0.1:9993'
RUNNING_INSIDE_TORNADO = False
########################################################################
# DATABASE CONFIGURATION
########################################################################
DATABASES = {"default": {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'zulip',
'USER': 'zulip',
'PASSWORD': '', # Authentication done via certificates
'HOST': '', # Host = '' => connect through a local socket
'SCHEMA': 'zulip',
'CONN_MAX_AGE': 600,
'OPTIONS': {
'connection_factory': TimeTrackingConnection
},
},
}
if DEVELOPMENT:
LOCAL_DATABASE_PASSWORD = get_secret("local_database_password")
DATABASES["default"].update({
'PASSWORD': LOCAL_DATABASE_PASSWORD,
'HOST': 'localhost'
})
elif REMOTE_POSTGRES_HOST != '':
DATABASES['default'].update({
'HOST': REMOTE_POSTGRES_HOST,
})
if get_secret("postgres_password") is not None:
DATABASES['default'].update({
'PASSWORD': get_secret("postgres_password"),
})
if REMOTE_POSTGRES_SSLMODE != '':
DATABASES['default']['OPTIONS']['sslmode'] = REMOTE_POSTGRES_SSLMODE
else:
DATABASES['default']['OPTIONS']['sslmode'] = 'verify-full'
if USING_PGROONGA:
# We need to have "pgroonga" schema before "pg_catalog" schema in
# the PostgreSQL search path, because "pgroonga" schema overrides
# the "@@" operator from "pg_catalog" schema, and "pg_catalog"
# schema is searched first if not specified in the search path.
# See also: http://www.postgresql.org/docs/current/static/runtime-config-client.html
pg_options = '-c search_path=%(SCHEMA)s,zulip,public,pgroonga,pg_catalog' % \
DATABASES['default']
DATABASES['default']['OPTIONS']['options'] = pg_options
########################################################################
# RABBITMQ CONFIGURATION
########################################################################
USING_RABBITMQ = True
RABBITMQ_PASSWORD = get_secret("rabbitmq_password")
########################################################################
# CACHING CONFIGURATION
########################################################################
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': MEMCACHED_LOCATION,
'TIMEOUT': 3600,
'OPTIONS': {
'verify_keys': True,
'tcp_nodelay': True,
'retry_timeout': 1,
}
},
'database': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'third_party_api_results',
# Basically never timeout. Setting to 0 isn't guaranteed
# to work, see https://code.djangoproject.com/ticket/9595
'TIMEOUT': 2000000000,
'OPTIONS': {
'MAX_ENTRIES': 100000000,
'CULL_FREQUENCY': 10,
}
},
}
########################################################################
# REDIS-BASED RATE LIMITING CONFIGURATION
########################################################################
RATE_LIMITING_RULES = [
(60, 100), # 100 requests max every minute
]
DEBUG_RATE_LIMITING = DEBUG
REDIS_PASSWORD = get_secret('redis_password')
########################################################################
# SECURITY SETTINGS
########################################################################
# Tell the browser to never send our cookies without encryption, e.g.
# when executing the initial http -> https redirect.
#
# Turn it off for local testing because we don't have SSL.
if PRODUCTION:
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
try:
# For get_updates hostname sharding.
domain = config_file.get('django', 'cookie_domain')
SESSION_COOKIE_DOMAIN = '.' + domain
CSRF_COOKIE_DOMAIN = '.' + domain
except six.moves.configparser.Error:
# Failing here is OK
pass
# Prevent Javascript from reading the CSRF token from cookies. Our code gets
# the token from the DOM, which means malicious code could too. But hiding the
# cookie will slow down some attackers.
CSRF_COOKIE_PATH = '/;HttpOnly'
CSRF_FAILURE_VIEW = 'zerver.middleware.csrf_failure'
if DEVELOPMENT:
# Use fast password hashing for creating testing users when not
# PRODUCTION. Saves a bunch of time.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher'
)
# Also we auto-generate passwords for the default users which you
# can query using ./manage.py print_initial_password
INITIAL_PASSWORD_SALT = get_secret("initial_password_salt")
########################################################################
# API/BOT SETTINGS
########################################################################
if "EXTERNAL_API_PATH" not in vars():
EXTERNAL_API_PATH = EXTERNAL_HOST + "/api"
EXTERNAL_API_URI = EXTERNAL_URI_SCHEME + EXTERNAL_API_PATH
SERVER_URI = EXTERNAL_URI_SCHEME + EXTERNAL_HOST
if "NAGIOS_BOT_HOST" not in vars():
NAGIOS_BOT_HOST = EXTERNAL_HOST
S3_KEY = get_secret("s3_key")
S3_SECRET_KEY = get_secret("s3_secret_key")
# GCM tokens are IP-whitelisted; if we deploy to additional
# servers you will need to explicitly add their IPs here:
# https://cloud.google.com/console/project/apps~zulip-android/apiui/credential
ANDROID_GCM_API_KEY = get_secret("android_gcm_api_key")
GOOGLE_OAUTH2_CLIENT_SECRET = get_secret('google_oauth2_client_secret')
DROPBOX_APP_KEY = get_secret("dropbox_app_key")
MAILCHIMP_API_KEY = get_secret("mailchimp_api_key")
# This comes from our mandrill accounts page
MANDRILL_API_KEY = get_secret("mandrill_api_key")
# Twitter API credentials
# Secrecy not required because its only used for R/O requests.
# Please don't make us go over our rate limit.
TWITTER_CONSUMER_KEY = get_secret("twitter_consumer_key")
TWITTER_CONSUMER_SECRET = get_secret("twitter_consumer_secret")
TWITTER_ACCESS_TOKEN_KEY = get_secret("twitter_access_token_key")
TWITTER_ACCESS_TOKEN_SECRET = get_secret("twitter_access_token_secret")
# These are the bots that Zulip sends automated messages as.
INTERNAL_BOTS = [{'var_name': 'NOTIFICATION_BOT',
'email_template': 'notification-bot@%s',
'name': 'Notification Bot'},
{'var_name': 'EMAIL_GATEWAY_BOT',
'email_template': 'emailgateway@%s',
'name': 'Email Gateway'},
{'var_name': 'NAGIOS_SEND_BOT',
'email_template': 'nagios-send-bot@%s',
'name': 'Nagios Send Bot'},
{'var_name': 'NAGIOS_RECEIVE_BOT',
'email_template': 'nagios-receive-bot@%s',
'name': 'Nagios Receive Bot'},
{'var_name': 'WELCOME_BOT',
'email_template': 'welcome-bot@%s',
'name': 'Welcome Bot'}]
if PRODUCTION:
INTERNAL_BOTS += [
{'var_name': 'NAGIOS_STAGING_SEND_BOT',
'email_template': 'nagios-staging-send-bot@%s',
'name': 'Nagios Staging Send Bot'},
{'var_name': 'NAGIOS_STAGING_RECEIVE_BOT',
'email_template': 'nagios-staging-receive-bot@%s',
'name': 'Nagios Staging Receive Bot'},
]
INTERNAL_BOT_DOMAIN = "zulip.com"
# Set the realm-specific bot names
for bot in INTERNAL_BOTS:
if vars().get(bot['var_name']) is None:
bot_email = bot['email_template'] % (INTERNAL_BOT_DOMAIN,)
vars()[bot['var_name']] = bot_email
if EMAIL_GATEWAY_PATTERN != "":
EMAIL_GATEWAY_EXAMPLE = EMAIL_GATEWAY_PATTERN % ("support+abcdefg",)
DEPLOYMENT_ROLE_KEY = get_secret("deployment_role_key")
if PRODUCTION:
FEEDBACK_TARGET = "https://zulip.com/api"
else:
FEEDBACK_TARGET = "http://localhost:9991/api"
########################################################################
# STATSD CONFIGURATION
########################################################################
# Statsd is not super well supported; if you want to use it you'll need
# to set STATSD_HOST and STATSD_PREFIX.
if STATSD_HOST != '':
INSTALLED_APPS += ['django_statsd']
STATSD_PORT = 8125
STATSD_CLIENT = 'django_statsd.clients.normal'
########################################################################
# CAMO HTTPS CACHE CONFIGURATION
########################################################################
if CAMO_URI != '':
# This needs to be synced with the Camo installation
CAMO_KEY = get_secret("camo_key")
########################################################################
# STATIC CONTENT AND MINIFICATION SETTINGS
########################################################################
STATIC_URL = '/static/'
# ZulipStorage is a modified version of PipelineCachedStorage,
# and, like that class, it inserts a file hash into filenames
# to prevent the browser from using stale files from cache.
#
# Unlike PipelineStorage, it requires the files to exist in
# STATIC_ROOT even for dev servers. So we only use
# ZulipStorage when not DEBUG.
# This is the default behavior from Pipeline, but we set it
# here so that urls.py can read it.
PIPELINE_ENABLED = not DEBUG
if DEBUG:
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
if PIPELINE_ENABLED:
STATIC_ROOT = os.path.abspath('prod-static/serve')
else:
STATIC_ROOT = os.path.abspath('static/')
else:
STATICFILES_STORAGE = 'zerver.storage.ZulipStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'pipeline.finders.PipelineFinder',
)
if PRODUCTION:
STATIC_ROOT = '/home/zulip/prod-static'
else:
STATIC_ROOT = os.path.abspath('prod-static/serve')
LOCALE_PATHS = (os.path.join(STATIC_ROOT, 'locale'),)
# We want all temporary uploaded files to be stored on disk.
FILE_UPLOAD_MAX_MEMORY_SIZE = 0
STATICFILES_DIRS = ['static/']
STATIC_HEADER_FILE = 'zerver/static_header.txt'
# To use minified files in dev, set PIPELINE_ENABLED = True. For the full
# cache-busting behavior, you must also set DEBUG = False.
#
# You will need to run update-prod-static after changing
# static files.
PIPELINE = {
'PIPELINE_ENABLED': PIPELINE_ENABLED,
'CSS_COMPRESSOR': 'pipeline.compressors.yui.YUICompressor',
'YUI_BINARY': '/usr/bin/env yui-compressor',
'STYLESHEETS': {
# If you add a style here, please update stylesheets()
# in frontend_tests/zjsunit/output.js as needed.
'activity': {
'source_filenames': ('styles/activity.css',),
'output_filename': 'min/activity.css'
},
'portico': {
'source_filenames': (
'third/zocial/zocial.css',
'styles/portico.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
),
'output_filename': 'min/portico.css'
},
# Two versions of the app CSS exist because of QTBUG-3467
'app-fontcompat': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'styles/components.css',
'styles/zulip.css',
'styles/settings.css',
'styles/subscriptions.css',
'styles/compose.css',
'styles/left-sidebar.css',
'styles/overlay.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/media.css',
# We don't want fonts.css on QtWebKit, so its omitted here
),
'output_filename': 'min/app-fontcompat.css'
},
'app': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'third/jquery-perfect-scrollbar/css/perfect-scrollbar.css',
'styles/components.css',
'styles/zulip.css',
'styles/settings.css',
'styles/subscriptions.css',
'styles/compose.css',
'styles/left-sidebar.css',
'styles/overlay.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
'styles/media.css',
),
'output_filename': 'min/app.css'
},
'common': {
'source_filenames': (
'third/bootstrap/css/bootstrap.css',
'third/bootstrap/css/bootstrap-btn.css',
'third/bootstrap/css/bootstrap-responsive.css',
),
'output_filename': 'min/common.css'
},
},
'JAVASCRIPT': {},
}
JS_SPECS = {
'common': {
'source_filenames': (
'node_modules/jquery/dist/jquery.js',
'third/underscore/underscore.js',
'js/blueslip.js',
'third/bootstrap/js/bootstrap.js',
'js/common.js',
),
'output_filename': 'min/common.js'
},
'signup': {
'source_filenames': (
'js/signup.js',
'node_modules/jquery-validation/dist/jquery.validate.js',
),
'output_filename': 'min/signup.js'
},
'api': {
'source_filenames': ('js/api.js',),
'output_filename': 'min/api.js'
},
'app_debug': {
'source_filenames': ('js/debug.js',),
'output_filename': 'min/app_debug.js'
},
'app': {
'source_filenames': [
'third/bootstrap-notify/js/bootstrap-notify.js',
'third/html5-formdata/formdata.js',
'node_modules/jquery-validation/dist/jquery.validate.js',
'node_modules/sockjs-client/sockjs.js',
'third/jquery-form/jquery.form.js',
'third/jquery-filedrop/jquery.filedrop.js',
'third/jquery-caret/jquery.caret.1.5.2.js',
'third/xdate/xdate.dev.js',
'third/spin/spin.js',
'third/jquery-mousewheel/jquery.mousewheel.js',
'third/jquery-throttle-debounce/jquery.ba-throttle-debounce.js',
'third/jquery-idle/jquery.idle.js',
'third/jquery-autosize/jquery.autosize.js',
'third/jquery-perfect-scrollbar/js/perfect-scrollbar.js',
'third/lazyload/lazyload.js',
'third/spectrum/spectrum.js',
'third/string-prototype-codepointat/codepointat.js',
'third/winchan/winchan.js',
'third/handlebars/handlebars.runtime.js',
'third/marked/lib/marked.js',
'templates/compiled.js',
'js/feature_flags.js',
'js/loading.js',
'js/util.js',
'js/dict.js',
'js/components.js',
'js/localstorage.js',
'js/channel.js',
'js/setup.js',
'js/unread_ui.js',
'js/muting.js',
'js/muting_ui.js',
'js/viewport.js',
'js/rows.js',
'js/people.js',
'js/unread.js',
'js/topic_list.js',
'js/pm_list.js',
'js/stream_list.js',
'js/filter.js',
'js/message_list_view.js',
'js/message_list.js',
'js/narrow.js',
'js/reload.js',
'js/compose_fade.js',
'js/fenced_code.js',
'js/echo.js',
'js/socket.js',
'js/compose.js',
'js/stream_color.js',
'js/admin.js',
'js/stream_data.js',
'js/subs.js',
'js/message_edit.js',
'js/condense.js',
'js/resize.js',
'js/floating_recipient_bar.js',
'js/ui.js',
'js/pointer.js',
'js/click_handlers.js',
'js/scroll_bar.js',
'js/gear_menu.js',
'js/copy_and_paste.js',
'js/popovers.js',
'js/typeahead_helper.js',
'js/search_suggestion.js',
'js/search.js',
'js/composebox_typeahead.js',
'js/navigate.js',
'js/hotkey.js',
'js/favicon.js',
'js/notifications.js',
'js/hashchange.js',
'js/invite.js',
'js/message_flags.js',
'js/alert_words.js',
'js/alert_words_ui.js',
'js/message_store.js',
'js/server_events.js',
'js/zulip.js',
'js/activity.js',
'js/colorspace.js',
'js/timerender.js',
'js/tutorial.js',
'js/templates.js',
'js/avatar.js',
'js/settings.js',
'js/tab_bar.js',
'js/emoji.js',
'js/referral.js',
'js/custom_markdown.js',
'js/bot_data.js',
# JS bundled by webpack is also included here if PIPELINE_ENABLED setting is true
],
'output_filename': 'min/app.js'
},
'activity': {
'source_filenames': (
'third/sorttable/sorttable.js',
),
'output_filename': 'min/activity.js'
},
# We also want to minify sockjs separately for the sockjs iframe transport
'sockjs': {
'source_filenames': ('node_modules/sockjs-client/sockjs.js',),
'output_filename': 'min/sockjs.min.js'
},
}
if PIPELINE_ENABLED:
# This is also done in test_settings.py, see comment there..
JS_SPECS['app']['source_filenames'].append('js/bundle.js')
app_srcs = JS_SPECS['app']['source_filenames']
########################################################################
# LOGGING SETTINGS
########################################################################
ZULIP_PATHS = [
("SERVER_LOG_PATH", "/var/log/zulip/server.log"),
("ERROR_FILE_LOG_PATH", "/var/log/zulip/errors.log"),
("MANAGEMENT_LOG_PATH", "/var/log/zulip/manage.log"),
("WORKER_LOG_PATH", "/var/log/zulip/workers.log"),
("PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.pickle"),
("JSON_PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.json"),
("EMAIL_MIRROR_LOG_PATH", "/var/log/zulip/email_mirror.log"),
("EMAIL_DELIVERER_LOG_PATH", "/var/log/zulip/email-deliverer.log"),
("LDAP_SYNC_LOG_PATH", "/var/log/zulip/sync_ldap_user_data.log"),
("QUEUE_ERROR_DIR", "/var/log/zulip/queue_error"),
("STATS_DIR", "/home/zulip/stats"),
("DIGEST_LOG_PATH", "/var/log/zulip/digest.log"),
("ANALYTICS_LOG_PATH", "/var/log/zulip/analytics.log"),
]
# The Event log basically logs most significant database changes,
# which can be useful for debugging.
if EVENT_LOGS_ENABLED:
ZULIP_PATHS.append(("EVENT_LOG_DIR", "/home/zulip/logs/event_log"))
else:
EVENT_LOG_DIR = None
for (var, path) in ZULIP_PATHS:
if DEVELOPMENT:
# if DEVELOPMENT, store these files in the Zulip checkout
path = os.path.join(DEVELOPMENT_LOG_DIRECTORY, os.path.basename(path))
# only `JSON_PERSISTENT_QUEUE_FILENAME` will be stored in `var`
if var == 'JSON_PERSISTENT_QUEUE_FILENAME':
path = os.path.join(os.path.join(DEPLOY_ROOT, 'var'), os.path.basename(path))
vars()[var] = path
ZULIP_WORKER_TEST_FILE = '/tmp/zulip-worker-test-file'
if IS_WORKER:
FILE_LOG_PATH = WORKER_LOG_PATH
else:
FILE_LOG_PATH = SERVER_LOG_PATH
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)-8s %(message)s'
}
},
'filters': {
'ZulipLimiter': {
'()': 'zerver.lib.logging_util.ZulipLimiter',
},
'EmailLimiter': {
'()': 'zerver.lib.logging_util.EmailLimiter',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'nop': {
'()': 'zerver.lib.logging_util.ReturnTrue',
},
'require_really_deployed': {
'()': 'zerver.lib.logging_util.RequireReallyDeployed',
},
},
'handlers': {
'zulip_admins': {
'level': 'ERROR',
'class': 'zerver.logging_handlers.AdminZulipHandler',
# For testing the handler delete the next line
'filters': ['ZulipLimiter', 'require_debug_false', 'require_really_deployed'],
'formatter': 'default'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': FILE_LOG_PATH,
},
'errors_file': {
'level': 'WARNING',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': ERROR_FILE_LOG_PATH,
},
},
'loggers': {
'': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'django': {
'handlers': (['zulip_admins'] if ERROR_REPORTING else [] +
['console', 'file', 'errors_file']),
'level': 'INFO',
'propagate': False,
},
'zulip.requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'zulip.queue': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'WARNING',
'propagate': False,
},
'zulip.management': {
'handlers': ['file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'WARNING',
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['file'],
'propagate': False,
},
## Uncomment the following to get all database queries logged to the console
# 'django.db': {
# 'handlers': ['console'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
}
}
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_REDIRECT_URL = '/'
# Client-side polling timeout for get_events, in milliseconds.
# We configure this here so that the client test suite can override it.
# We already kill the connection server-side with heartbeat events,
# but it's good to have a safety. This value should be greater than
# (HEARTBEAT_MIN_FREQ_SECS + 10)
POLL_TIMEOUT = 90 * 1000
# iOS App IDs
ZULIP_IOS_APP_ID = 'com.zulip.Zulip'
DBX_IOS_APP_ID = 'com.dropbox.Zulip'
########################################################################
# SSO AND LDAP SETTINGS
########################################################################
USING_APACHE_SSO = ('zproject.backends.ZulipRemoteUserBackend' in AUTHENTICATION_BACKENDS)
if len(AUTHENTICATION_BACKENDS) == 1 and (AUTHENTICATION_BACKENDS[0] ==
"zproject.backends.ZulipRemoteUserBackend"):
HOME_NOT_LOGGED_IN = "/accounts/login/sso"
ONLY_SSO = True
else:
HOME_NOT_LOGGED_IN = '/login'
ONLY_SSO = False
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipDummyBackend',)
POPULATE_PROFILE_VIA_LDAP = bool(AUTH_LDAP_SERVER_URI)
if POPULATE_PROFILE_VIA_LDAP and \
'zproject.backends.ZulipLDAPAuthBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipLDAPUserPopulator',)
else:
POPULATE_PROFILE_VIA_LDAP = 'zproject.backends.ZulipLDAPAuthBackend' in AUTHENTICATION_BACKENDS or POPULATE_PROFILE_VIA_LDAP
########################################################################
# GITHUB AUTHENTICATION SETTINGS
########################################################################
# SOCIAL_AUTH_GITHUB_KEY is set in /etc/zulip/settings.py
SOCIAL_AUTH_GITHUB_SECRET = get_secret('social_auth_github_secret')
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login/'
SOCIAL_AUTH_GITHUB_SCOPE = ['email']
SOCIAL_AUTH_GITHUB_ORG_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_ORG_SECRET = SOCIAL_AUTH_GITHUB_SECRET
SOCIAL_AUTH_GITHUB_TEAM_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_TEAM_SECRET = SOCIAL_AUTH_GITHUB_SECRET
########################################################################
# EMAIL SETTINGS
########################################################################
# If an email host is not specified, fail silently and gracefully
if not EMAIL_HOST and PRODUCTION:
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
elif DEVELOPMENT:
# In the dev environment, emails are printed to the run-dev.py console.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_PASSWORD = get_secret('email_password')
if EMAIL_GATEWAY_PASSWORD is None:
EMAIL_GATEWAY_PASSWORD = get_secret('email_gateway_password')
if vars().get("AUTH_LDAP_BIND_PASSWORD") is None:
AUTH_LDAP_BIND_PASSWORD = get_secret('auth_ldap_bind_password')
# Set the sender email address for Django traceback error reporting
if SERVER_EMAIL is None:
SERVER_EMAIL = DEFAULT_FROM_EMAIL
########################################################################
# MISC SETTINGS
########################################################################
if PRODUCTION:
# Filter out user data
DEFAULT_EXCEPTION_REPORTER_FILTER = 'zerver.filters.ZulipExceptionReporterFilter'
# This is a debugging option only
PROFILE_ALL_REQUESTS = False
CROSS_REALM_BOT_EMAILS = set(('[email protected]', '[email protected]'))
| apache-2.0 | -763,140,899,692,655,200 | 36.853994 | 128 | 0.555418 | false |
ecell/libmoleculizer | python-src/language_parser/moleculizer/moleculizerrules.py | 1 | 21379 | ###############################################################################
# Copyright (C) 2007, 2008, 2009 The Molecular Sciences Institute
# Original Author:
# Nathan Addy, Scientific Programmer Email: [email protected]
# The Molecular Sciences Institute
#
###############################################################################
import pdb
import re
import util
from xmlobject import XmlObject
import StringIO
from sectionparameter import SymbolicExpressionEvaluator
from sectionmodifications import ModificationsSection
from sectionmols import MolsSection
from sectionallostery import AllostericPlexesSection, AllostericOmnisSection
from sectionreactionrules import ReactionRulesSection
from sectionspeciesstreams import SpeciesStreamsSection
from sectionexplicitspeciesblock import ExplicitSpeciesSection
from moleculizer_xcpt import *
class MoleculizerRulesFile:
"""
This object acts as an parsing thing that outputs moleculizer files xml,
suitable for processing internally by a mzr::moleculizer instance."""
def BlockPassesSanityCheck( linearray ):
linearray = [x for x in linearray if x.strip() != ""]
if len(linearray) == 0: return True
everyLineEndsWithSemiColon = [ x[-1] == ";" and x.count(";") == 1for x in linearray]
noWhiteSpace = [ (x.count("\n") + x.count(" ") + x.count("\t") == 0) for x in linearray]
return reduce(util.And, everyLineEndsWithSemiColon) and reduce(util.And, noWhiteSpace)
BlockPassesSanityCheck = staticmethod( BlockPassesSanityCheck )
def addWholeRulesString( self, rulesString):
print "Reading file '%s' " % rulesString
lines = rulesString.split("\n")
parameterBlock, modificationsBlock, molsBlock, allostericPlexes, allostericOmnis,\
reactionRulesBlock, dimerizationGenBlock, omniGenBlock, \
explicitSpeciesBlock, speciesStreamBlock = parseBlockTypesFromRulesFile( lines )
self.addParameterBlock( parameterBlock )
self.addModicationsBlock( modificationsBlock )
self.addMolsBlock( molsBlock )
self.addAllostericPlexesBlock( allostericPlexes )
self.addAllostericOmnisBlock( allostericOmnis )
self.addReactionRulesBlock( reactionRulesBlock, dimerizationGenBlock, \
omniGenBlock, [] )
self.addExplicitSpeciesBlock( explicitSpeciesBlock )
self.addSpeciesStreamsBlock( speciesStreamBlock )
return
def addWholeRulesFile(self, rulesFile):
parameterBlock, modificationsBlock, molsBlock, allostericPlexes, allostericOmnis, \
reactionRulesBlock, dimerizationGenBlock, omniGenBlock, \
explicitSpeciesBlock, speciesStreamBlock = parseBlockTypesFromRulesFile( open(rulesFile).readlines() )
self.addParameterBlock( parameterBlock )
self.addModicationsBlock( modificationsBlock )
self.addMolsBlock( molsBlock )
self.addAllostericPlexesBlock( allostericPlexes )
self.addAllostericOmnisBlock( allostericOmnis )
self.addReactionRulesBlock( reactionRulesBlock, dimerizationGenBlock, \
omniGenBlock, [] )
self.addExplicitSpeciesBlock( explicitSpeciesBlock )
self.addSpeciesStreamsBlock( speciesStreamBlock )
return
def addParameterStatement(self, paramStatement):
paramStatement = self.PreProcessStatement( paramStatement )
print "Adding param line: '%s'" % paramStatement
self.parameterBlock.append( paramStatement)
self.parameterEE = SymbolicExpressionEvaluator( self.parameterBlock )
return
def addModificationStatement(self, modLine):
modLine = self.PreProcessStatement( modLine )
print "Adding mod line: '%s'" % modLine
self.modificationsBlock.append( modLine)
self.modificationsSection = ModificationsSection( self.modificationsBlock )
return
def addMolsStatement(self, molsLine):
molsLine = self.PreProcessStatement( molsLine )
self.molsBlock.append( molsLine )
self.molsSection = MolsSection( molsBlock )
return
def addAllostericPlexStatement(self, alloPlexLine):
alloPlexLine = self.PreProcessStatement( alloPlexLine )
self.allostericPlexes.append( alloPlexLine )
self.allostericPlexesSection = AllostericPlexesSection( self.allostericPlexes )
return
def addAllostericOmniStatement(self, alloOmniLine):
alloOmniLine = self.PreProcessStatement( alloOmniLine )
self.allostericOmnis.append( alloOmniLine )
self.allostericOmnisSection = AllostericOmnisSection( self.allostericOmnis )
return
def addDimerizationGenStatement(self, dimerGenLine):
dimerGenLine = self.PreProcessStatement( dimerGenLine )
self.dimerizationGenBlock.append(dimerGenLine)
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
return
def addOmniGenStatement(self, omniGenLine):
omniGenLine = self.PreProcessStatement( omniGenLine )
self.omniGenLine.append( omniGenLine )
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
return
def addUniMolGenStatement(self, uniMolGenLine):
uniMolGenLine = self.PreProcessStatement( uniMolGenLine )
self.uniMolGenBlock.append( uniMolGenLine )
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
return
def addExplicitSpeciesStatement(self, explicitSpeciesStatement):
explicitSpeciesStatement = self.PreProcessStatement( explicitSpeciesStatement )
self.explicitSpeciesBlock.append( explicitSpeciesStatement )
self.explicitSpeciesSection = ExplicitSpeciesSection( self.explicitSpeciesBlock )
return
def addSpeciesStreamStatement(self, speciesStreamLine):
speciesStreamLine = self.PreProcessStatement( speciesStreamLine )
self.speciesStreamBlock.append( speciesStreamLine )
self.speciesStreamSection = SpeciesStreamsSection( self.speciesStreamBlock )
return
def __init__(self):
# These are the lines of input, in one statement per line form, with no whitespace
self.parameterBlock = []
self.modificationsBlock = []
self.molsBlock = []
self.allostericPlexes = []
self.allostericOmnis = []
self.reactionRulesBlock = []
self.dimerizationGenBlock = []
self.omniGenBlock = []
self.uniMolGenBlock = []
self.explicitSpeciesBlock = []
self.speciesStreamBlock = []
# These are the objects that will be used to process the parsed
# data.
# A section is an intermediate between a rules file (they have lines, for example,
# and can answer questions about what has been parsed ) and an xml section (it can
# write out an xml section -
# Parameters doesn't write anything out currently, but easily could
self.parameterSection = 0
self.modificationsSection = 0
self.molsSection = 0
self.allostericPlexesSection = 0
self.allostericOmnisSection = 0
self.reactionGensSection = 0
self.explicitSpeciesSection = 0
self.speciesStreamSection = 0
def getOutputFileName(self):
return self.outputFileName
def write(self):
self.openXmlFile = open(self.outputFileName, 'w')
self.__writeOutput(self.openXmlFile)
return
def writeToString(self):
myString = StringIO.StringIO()
self.__writeOutput( myString )
return myString.getvalue()
def close(self):
self.openXmlFile.close()
def addParameterBlock(self, parameterBlock, overwrite = False):
if self.parameterBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a parameter block twice.")
if not self.BlockPassesSanityCheck( parameterBlock ):
raise InsaneBlockOnTheLooseException(parameterBlock, "parameter block")
self.parameterBlock = parameterBlock[:]
self.parameterEE = SymbolicExpressionEvaluator( self.parameterBlock )
def addModicationsBlock(self, modificationsBlock, overwrite = False):
if self.modificationsBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a modifications block twice.")
if not self.BlockPassesSanityCheck( modificationsBlock ):
raise InsaneBlockOnTheLooseException(modificationsBlock, "modifications block")
self.modificationsBlock = modificationsBlock[:]
self.modificationsSection = ModificationsSection( self.modificationsBlock )
return
def addMolsBlock(self, molsBlock):
if self.molsBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a mols block twice.")
if not self.BlockPassesSanityCheck( molsBlock ):
raise InsaneBlockOnTheLooseException(molsBlock, "mols block")
self.molsBlock = molsBlock[:]
self.molsSection = MolsSection( molsBlock )
def addAllostericPlexesBlock(self, apBlock, overwrite = False):
if self.allostericPlexes and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add an allosteric plexes block twice.")
if not self.BlockPassesSanityCheck( apBlock ):
raise InsaneBlockOnTheLooseException(apBlock, "allosteric plexes block")
self.allostericPlexes = apBlock[:]
self.allostericPlexesSection = AllostericPlexesSection( self.allostericPlexes )
def addAllostericOmnisBlock(self, aoBlock, overwrite = False):
if self.allostericOmnis and not overwrite: raise MzrExceptions.MoleculizerException("Error: Cannot add an allosteric omnis block twice.")
if not self.BlockPassesSanityCheck( aoBlock ):
raise InsaneBlockOnTheLooseException( aoBlock, "allosteric omnis block")
self.allostericOmnis = aoBlock[:]
self.allostericOmnisSection = AllostericOmnisSection( self.allostericOmnis )
def addReactionRulesBlock( self, rrBlock, dimerGenBlock, omniGenBlock, uniMolGenBlock, overwrite = False):
if self.reactionRulesBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a reaction rules block twice.")
if not self.BlockPassesSanityCheck( rrBlock ):
raise InsaneBlockOnTheLooseException(rrBlock, "reaction rules")
if not self.BlockPassesSanityCheck( dimerGenBlock ):
raise InsaneBlockOnTheLooseException(dimerGenBlock, "dimerization gen block")
if not self.BlockPassesSanityCheck( omniGenBlock ):
raise InsaneBlockOnTheLooseException(omniGenBlock, "omni-gen block")
if not self.BlockPassesSanityCheck( uniMolGenBlock ):
raise InsaneBlockOnTheLooseException(uniMolGenBlock, "uni-mol-gen block")
self.reactionRulesBlock.extend( rrBlock )
self.dimerizationGenBlock.extend( dimerGenBlock )
self.omniGenBlock.extend( omniGenBlock )
self.uniMolGenBlock.extend( uniMolGenBlock )
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
def addExplicitSpeciesBlock( self, esBlock, overwrite = False):
if self.explicitSpeciesBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add an explicit species block twice.")
if not self.BlockPassesSanityCheck( esBlock ):
raise InsaneBlockOnTheLooseException(esBlock, "explicit-species")
self.explicitSpeciesBlock = esBlock[:]
self.explicitSpeciesSection = ExplicitSpeciesSection( esBlock )
def addSpeciesStreamsBlock(self, ssBlock, overwrite = False):
if self.speciesStreamBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a species stream block twice.")
if not self.BlockPassesSanityCheck( ssBlock ):
raise InsaneBlockOnTheLooseException(ssBlock, "")
self.speciesStreamBlock = ssBlock[:]
self.speciesStreamSection = SpeciesStreamsSection( self.speciesStreamBlock )
def __processAllostericRulesBlocks( self, allostericPlexBlock, allostericOmniBlock):
return 0
def __processReactionRulesBlocks( self, rxnRulesBlock, dimerBlock, omniGenBlock, uniGenBlock):
return 0
def __processExplicitSpeciesBlock( self, explicitSpeciesBlock):
return 0
def __processSpeciesStreamBlock( self, ssBlock):
return 0
def __writeOutput(self, openXMLFile):
xmlobject = self.__constructXMLRepresentation()
xmlobject.writeall(openXMLFile)
def __constructXMLRepresentation(self):
rootNode = XmlObject("moleculizer-input")
modelElmt = XmlObject("model")
modelElmt.attachToParent(rootNode)
streamsElmt = XmlObject("streams", rootNode)
self.__addModifications( modelElmt )
self.__addMols( modelElmt )
self.__addAllostericPlexes( modelElmt )
self.__addAllostericOmnis( modelElmt )
self.__addReactionGens( modelElmt )
self.__addExplicitSpecies( modelElmt )
self.__addExplicitReactions( modelElmt )
self.__addSpeciesStreams( streamsElmt )
return rootNode
def __addModifications(self, parentObject):
# Write me!!!
modificationsSection = XmlObject("modifications", parentObject)
if self.modificationsSection:
self.modificationsSection.writeModificationsSections( modificationsSection )
return
def __addMols(self, parentObject):
molsSection = XmlObject("mols", parentObject)
if self.molsSection:
self.molsSection.writeMolsSection( molsSection)
return
def __addAllostericPlexes(self, parentObject):
allostericPlexes = XmlObject("allosteric-plexes", parentObject)
if self.allostericPlexesSection:
self.allostericPlexesSection.writeAllostericPlexesSection(allostericPlexes)
return
def __addAllostericOmnis(self, parentObject):
allostericOmnis = XmlObject("allosteric-omnis", parentObject)
if self.allostericOmnisSection:
self.allostericOmnisSection.writeAllostericOmnisSection( allostericOmnis )
return
def __addReactionGens(self, parentObject):
reactionGenElmt = XmlObject("reaction-gens", parentObject)
if self.reactionGensSection:
self.reactionGensSection.writeReactionGensSection( reactionGenElmt )
return
def __addSpeciesStreams( self, parentObject):
speciesStreamsElement = XmlObject("species-streams", parentObject)
if self.speciesStreamSection:
self.speciesStreamSection.writeSpeciesStreamSection( speciesStreamsElement )
def __addExplicitSpecies(self, parentObject):
explicitSpeciesElmt = XmlObject("explicit-species", parentObject)
if self.explicitSpeciesSection:
self.explicitSpeciesSection.writeExplicitSpeciesSection( explicitSpeciesElmt )
return
def __addExplicitReactions( self, modelElmt ):
explicitReactionsElmt = XmlObject("explicit-reactions", modelElmt)
return
def parseBlockTypesFromRulesFile(textRulesFile):
textRulesFile = [re.sub("#.*$", "", x) for x in textRulesFile] # Delete all comments
# textRulesFile = [re.sub("//.*$", "", x) for x in textRulesFile] # Delete all comments
textRulesFile = [re.sub(r"\s*", "", x) for x in textRulesFile] # Delete all whitespace
textRulesFile = [x.strip() for x in textRulesFile] # Strip it for good measure
textRulesFile = [x for x in textRulesFile if x != ""] # This must be last, because line.strip() results in some empty lines.
parameterBlock = []
modificationsBlock = []
molsBlock = []
allostericPlexes = []
allostericOmnis = []
reactionRulesBlock = []
dimerizationGenBlock = []
omniGenBlock = []
uniMolGenBlock = []
explicitSpeciesBlock = []
speciesStreamBlock = []
# textRulesFile = '\n'.join(textRulesFile)
# textRulesFile = re.sub(r"\\\s*\n\s*", " ", textRulesFile)
# textRulesFile = textRulesFile.split("\n")
blockCodes = ["Parameters", "Modifications", "Molecules", "Explicit-Allostery", "Allosteric-Classes",
"Reaction-Rules", "Association-Reactions", "Transformation-Reactions",
"Explicit-Species", "Species-Classes" ]
blockObjNdx = -1
blockDataObj = [ (blockCodes[0], parameterBlock), \
(blockCodes[1], modificationsBlock), \
(blockCodes[2], molsBlock), \
(blockCodes[3], allostericPlexes),
(blockCodes[4], allostericOmnis),
(blockCodes[5], reactionRulesBlock), \
(blockCodes[6], dimerizationGenBlock), \
(blockCodes[7], omniGenBlock), \
(blockCodes[8], explicitSpeciesBlock),\
(blockCodes[9], speciesStreamBlock) ]
currentDmp = []
try:
assert( textRulesFile[0].startswith("="))
except:
raise Exception("Line '%s' should start with a '=', but does not." % textRulesFile[0])
blockObjNdx = -1
for line in textRulesFile:
if line.startswith("="):
blockObjNdx = returnNewIndex(line, blockDataObj)
currentDmp = blockDataObj[blockObjNdx][1]
else:
currentDmp.append(line)
return getFormattedArray(parameterBlock), getFormattedArray(modificationsBlock), getFormattedArray(molsBlock), getFormattedArray(allostericPlexes), getFormattedArray(allostericOmnis), \
getFormattedArray(reactionRulesBlock), getFormattedArray(dimerizationGenBlock), getFormattedArray(omniGenBlock), \
getFormattedArray(explicitSpeciesBlock), getFormattedArray(speciesStreamBlock)
def returnNewIndex(lineOfText, blockObjData):
key = lineOfText.strip().strip("=").strip()
for ndx in range(len(blockObjData)):
if key == blockObjData[ndx][0]:
return ndx
raise Exception("Section title '%s' cannot be found" % key)
return -1
def barf(msg):
sys.stderr.write(msg + '\n')
sys.stderr.write("Crashing....\n")
sys.exit(1)
def printerror(msg):
sys.stderr.write(msg + '\n')
return
def getFormattedArray( arrayToFormat ):
tmpArray = getBalancedArray( arrayToFormat )
tmpString = "".join( tmpArray )
if tmpString == "":
return []
try:
assert( tmpString[-1] == ";" )
except:
raise Exception("Error parsing block '%s'. Line does not end in ';'." % repr(arrayToFormat))
tmpArray = tmpString.split(";")
tmpArray.pop() # Last entry is blank
tmpArray = [tok + ";" for tok in tmpArray]
return tmpArray
def getBalancedArray( arrayToBalance ):
if not EachEntryIsParenBalanced( arrayToBalance ):
# Combine the ..., ndx_i, ndx_(i+1) where ndx_i is the smallest i not balanced
return getBalancedArray( GetIncrementallyBetterArray( arrayToBalance ) )
else:
return arrayToBalance
def GetIncrementallyBetterArray( anArray ):
values = [ StringIsParenBalenced(x) for x in anArray]
# This is correct: this function should only be used if the array does not pass
# EachEntryIsParenBalanced.
assert( False in values)
badNdx = values.index( False )
combinedTokens = anArray[badNdx] + anArray[badNdx + 1]
returnArray = anArray[ : badNdx]
returnArray.append( combinedTokens )
returnArray.extend( anArray[badNdx + 2 : ] )
return returnArray
def EachEntryIsParenBalanced( array ):
entries = [ StringIsParenBalenced(x) for x in array ]
returnVal = True
for val in entries:
returnVal = returnVal and val
return returnVal
def StringIsParenBalenced(line):
return ( line.count("(") == line.count(")") and
line.count("[") == line.count("]") and
line.count("{") == line.count("}") )
| gpl-2.0 | -8,921,513,987,681,306,000 | 36.245645 | 189 | 0.658263 | false |
mdunker/usergrid | utils/usergrid-util-python/usergrid_tools/queue/dlq-iterator-checker.py | 2 | 4809 | # */
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing,
# * software distributed under the License is distributed on an
# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# * KIND, either express or implied. See the License for the
# * specific language governing permissions and limitations
# * under the License.
# */
from multiprocessing.pool import Pool
import argparse
import json
import datetime
import os
import time
import sys
import boto
from boto import sqs
import requests
__author__ = '[email protected]'
sqs_conn = None
sqs_queue = None
# THIS WAS USED TO TAKE MESSAGES OUT OF THE DEAD LETTER AND TEST WHETHER THEY EXISTED OR NOT
def total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10.0 ** 6) / 10.0 ** 6
def total_milliseconds(td):
return (td.microseconds + td.seconds * 1000000) / 1000
def get_time_remaining(count, rate):
if rate == 0:
return 'NaN'
seconds = count * 1.0 / rate
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def parse_args():
parser = argparse.ArgumentParser(description='Usergrid Loader - Queue Monitor')
parser.add_argument('-c', '--config',
help='The queue to load into',
type=str,
default='4g.json')
my_args = parser.parse_args(sys.argv[1:])
print str(my_args)
return vars(my_args)
def check_exists(sqs_message):
# checks whether an entity is deleted. if the entity is found then it prints an error message.
# this was used when there were many messages going to DLQ and the reason was because the entity had been deleted
try:
message = json.loads(sqs_message.get_body())
except ValueError:
print 'Unable to decode JSON: %s' % sqs_message.get_body()
return
try:
for event_name, event_data in message.iteritems():
entity_id_scope = event_data.get('entityIdScope')
app_id = entity_id_scope.get('applicationScope', {}).get('application', {}).get('uuid')
entity_id_scope = entity_id_scope.get('id')
entity_id = entity_id_scope.get('uuid')
entity_type = entity_id_scope.get('type')
url = 'http://localhost:8080/{app_id}/{entity_type}/{entity_id}'.format(
app_id=app_id,
entity_id=entity_id,
entity_type=entity_type
)
url = 'https://{host}/{basepath}/{app_id}/{entity_type}/{entity_id}'.format(
host='REPLACE',
basepath='REPLACE',
app_id=app_id,
entity_id=entity_id,
entity_type=entity_type
)
r = requests.get(url=url,
headers={
'Authorization': 'Bearer XCA'
})
if r.status_code != 404:
print 'ERROR/FOUND [%s]: %s' % (r.status_code, url)
else:
print '[%s]: %s' % (r.status_code, url)
deleted = sqs_conn.delete_message_from_handle(sqs_queue, sqs_message.receipt_handle)
if not deleted:
print 'no delete!'
except KeyboardInterrupt, e:
raise e
def main():
global sqs_conn, sqs_queue
args = parse_args()
start_time = datetime.datetime.utcnow()
first_start_time = start_time
print "first start: %s" % first_start_time
with open(args.get('config'), 'r') as f:
config = json.load(f)
sqs_config = config.get('sqs')
sqs_conn = boto.sqs.connect_to_region(**sqs_config)
queue_name = 'baas20sr_usea_baas20sr_usea_index_all_dead'
sqs_queue = sqs_conn.get_queue(queue_name)
last_size = sqs_queue.count()
print 'Last Size: ' + str(last_size)
pool = Pool(10)
keep_going = True
while keep_going:
sqs_messages = sqs_queue.get_messages(
num_messages=10,
visibility_timeout=10,
wait_time_seconds=10)
if len(sqs_messages) > 0:
pool.map(check_exists, sqs_messages)
else:
print 'DONE!'
pool.terminate()
keep_going = False
if __name__ == '__main__':
main()
| apache-2.0 | 4,807,315,767,958,435,000 | 28.685185 | 117 | 0.589728 | false |
nevermoreluo/privateoverseas | overseas/migrations/0001_initial.py | 1 | 3314 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-05 02:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('agid', models.PositiveIntegerField(unique=True)),
('name', models.CharField(max_length=100)),
('desc', models.CharField(max_length=200)),
('api_correlation_id', models.CharField(max_length=100)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Geo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metro', models.CharField(blank=True, max_length=100, null=True)),
('region', models.CharField(max_length=100)),
('requests', models.DecimalField(decimal_places=2, max_digits=20)),
('throughput', models.DecimalField(decimal_places=2, max_digits=20)),
('peak_throughput', models.DecimalField(decimal_places=2, max_digits=20)),
('bandwidth', models.DecimalField(decimal_places=2, max_digits=20)),
('peak_bandwidth', models.DecimalField(decimal_places=2, max_digits=20)),
('hit_rate', models.DecimalField(decimal_places=2, max_digits=20)),
('status_4XX', models.DecimalField(decimal_places=2, max_digits=20)),
('status_5XX', models.DecimalField(decimal_places=2, max_digits=20)),
('time', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='NetworkIdentifiers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ni', models.CharField(max_length=100, unique=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scid', models.CharField(max_length=50, unique=True)),
('active', models.BooleanField(default=True)),
('access_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='overseas.AccessGroup')),
],
),
migrations.AddField(
model_name='networkidentifiers',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='overseas.Service'),
),
migrations.AddField(
model_name='geo',
name='ni',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='overseas.NetworkIdentifiers'),
),
]
| gpl-3.0 | -9,114,533,386,148,599,000 | 44.39726 | 124 | 0.573929 | false |
zstackio/zstack-woodpecker | integrationtest/vm/mini/paths/path54.py | 1 | 2373 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", path_list=[
[TestAction.add_image, 'image1', 'root', os.environ.get('isoForVmUrl')],
[TestAction.create_vm_by_image, 'image1', 'iso', 'vm1'],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.detach_volume, 'volume1'],
[TestAction.create_mini_vm, 'vm2', 'memory=random'],
[TestAction.resize_data_volume, 'volume1', 5*1024*1024],
[TestAction.attach_volume, 'vm2', 'volume1'],
[TestAction.create_mini_vm, 'vm3', 'network=random'],
[TestAction.create_volume, 'volume2', 'flag=thin,scsi'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_volume_backup, 'volume1-backup1'],
[TestAction.start_vm, 'vm2'],
[TestAction.delete_image, 'image2'],
[TestAction.recover_image, 'image2'],
[TestAction.delete_image, 'image2'],
[TestAction.expunge_image, 'image2'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup2'],
[TestAction.reboot_vm, 'vm2'],
[TestAction.resize_data_volume, 'volume2', 5*1024*1024],
[TestAction.create_volume, 'volume3', 'size=random', 'flag=scsi'],
[TestAction.delete_volume, 'volume3'],
[TestAction.stop_vm, 'vm3'],
[TestAction.add_image, 'image3', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_volume, 'volume1'],
[TestAction.expunge_volume, 'volume1'],
[TestAction.reboot_vm, 'vm2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup3'],
[TestAction.resize_volume, 'vm3', 5*1024*1024],
[TestAction.delete_image, 'image1'],
[TestAction.delete_vm_backup, 'vm2-backup3'],
[TestAction.add_image, 'image4', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.create_volume, 'volume4', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume2'],
])
'''
The final status:
Running:['vm1', 'vm2']
Stopped:['vm3']
Enadbled:['volume1-backup1', 'vm1-backup2', 'image3', 'image4']
attached:['volume2']
Detached:['volume4']
Deleted:['volume3', 'vm2-backup3', 'image1']
Expunged:['volume1', 'image2']
Ha:[]
Group:
vm_backup1:['vm1-backup2']---vm1_
'''
| apache-2.0 | -4,760,517,556,792,538,000 | 39.220339 | 104 | 0.686473 | false |
cournape/ensetuptools | setuptools/command/setopt.py | 1 | 4977 | import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind=='local':
return 'setup.cfg'
if kind=='global':
return os.path.join(
os.path.dirname(distutils.__file__),'distutils.cfg'
)
if kind=='user':
dot = os.name=='posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from ConfigParser import RawConfigParser
log.debug("Reading configuration from %s", filename)
opts = RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option,value in options.items():
if value is None:
log.debug("Deleting %s.%s from %s",
section, option, filename)
opts.remove_option(section,option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug("Setting %s.%s to %r in %s",
section, option, value, filename)
opts.set(section,option,value)
log.info("Writing %s", filename)
if not dry_run:
f = open(filename,'w'); opts.write(f); f.close()
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames)>1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-','_'):self.set_value}
},
self.dry_run
)
| bsd-3-clause | 6,925,647,793,835,101,000 | 34.297872 | 79 | 0.57183 | false |
v6ak/qubes-core-admin | core/storage/__init__.py | 2 | 15124 | #!/usr/bin/python2
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2013 Marek Marczykowski <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
from __future__ import absolute_import
import ConfigParser
import os
import os.path
import shutil
import subprocess
import sys
import qubes.qubesutils
from qubes.qubes import QubesException, defaults, system_path
CONFIG_FILE = '/etc/qubes/storage.conf'
class QubesVmStorage(object):
"""
Class for handling VM virtual disks. This is base class for all other
implementations, mostly with Xen on Linux in mind.
"""
def __init__(self, vm,
private_img_size = None,
root_img_size = None,
modules_img = None,
modules_img_rw = False):
self.vm = vm
self.vmdir = vm.dir_path
if private_img_size:
self.private_img_size = private_img_size
else:
self.private_img_size = defaults['private_img_size']
if root_img_size:
self.root_img_size = root_img_size
else:
self.root_img_size = defaults['root_img_size']
self.root_dev = "xvda"
self.private_dev = "xvdb"
self.volatile_dev = "xvdc"
self.modules_dev = "xvdd"
# For now compute this path still in QubesVm
self.modules_img = modules_img
self.modules_img_rw = modules_img_rw
# Additional drive (currently used only by HVM)
self.drive = None
def format_disk_dev(self, path, script, vdev, rw=True, type="disk",
domain=None):
if path is None:
return ''
template = " <disk type='block' device='{type}'>\n" \
" <driver name='phy'/>\n" \
" <source dev='{path}'/>\n" \
" <target dev='{vdev}' bus='xen'/>\n" \
"{params}" \
" </disk>\n"
params = ""
if not rw:
params += " <readonly/>\n"
if domain:
params += " <backenddomain name='%s'/>\n" % domain
if script:
params += " <script path='%s'/>\n" % script
return template.format(path=path, vdev=vdev, type=type, params=params)
def get_config_params(self):
args = {}
args['rootdev'] = self.root_dev_config()
args['privatedev'] = self.private_dev_config()
args['volatiledev'] = self.volatile_dev_config()
args['otherdevs'] = self.other_dev_config()
return args
def root_dev_config(self):
raise NotImplementedError
def private_dev_config(self):
raise NotImplementedError
def volatile_dev_config(self):
raise NotImplementedError
def other_dev_config(self):
if self.modules_img is not None:
return self.format_disk_dev(self.modules_img,
None,
self.modules_dev,
self.modules_img_rw)
elif self.drive is not None:
(drive_type, drive_domain, drive_path) = self.drive.split(":")
if drive_type == "hd":
drive_type = "disk"
writable = False
if drive_type == "disk":
writable = True
if drive_domain.lower() == "dom0":
drive_domain = None
return self.format_disk_dev(drive_path, None,
self.modules_dev,
rw=writable,
type=drive_type,
domain=drive_domain)
else:
return ''
def _copy_file(self, source, destination):
"""
Effective file copy, preserving sparse files etc.
"""
# TODO: Windows support
# We prefer to use Linux's cp, because it nicely handles sparse files
retcode = subprocess.call (["cp", "--reflink=auto", source, destination])
if retcode != 0:
raise IOError ("Error while copying {0} to {1}".\
format(source, destination))
def get_disk_utilization(self):
return qubes.qubesutils.get_disk_usage(self.vmdir)
def get_disk_utilization_private_img(self):
return qubes.qubesutils.get_disk_usage(self.private_img)
def get_private_img_sz(self):
if not os.path.exists(self.private_img):
return 0
return os.path.getsize(self.private_img)
def resize_private_img(self, size):
raise NotImplementedError
def create_on_disk_private_img(self, verbose, source_template = None):
raise NotImplementedError
def create_on_disk_root_img(self, verbose, source_template = None):
raise NotImplementedError
def create_on_disk(self, verbose, source_template = None):
if source_template is None:
source_template = self.vm.template
old_umask = os.umask(002)
if verbose:
print >> sys.stderr, "--> Creating directory: {0}".format(self.vmdir)
os.mkdir (self.vmdir)
self.create_on_disk_private_img(verbose, source_template)
self.create_on_disk_root_img(verbose, source_template)
self.reset_volatile_storage(verbose, source_template)
os.umask(old_umask)
def clone_disk_files(self, src_vm, verbose):
if verbose:
print >> sys.stderr, "--> Creating directory: {0}".format(self.vmdir)
os.mkdir (self.vmdir)
if src_vm.private_img is not None and self.private_img is not None:
if verbose:
print >> sys.stderr, "--> Copying the private image:\n{0} ==>\n{1}".\
format(src_vm.private_img, self.private_img)
self._copy_file(src_vm.private_img, self.private_img)
if src_vm.updateable and src_vm.root_img is not None and self.root_img is not None:
if verbose:
print >> sys.stderr, "--> Copying the root image:\n{0} ==>\n{1}".\
format(src_vm.root_img, self.root_img)
self._copy_file(src_vm.root_img, self.root_img)
# TODO: modules?
def rename(self, old_name, new_name):
old_vmdir = self.vmdir
new_vmdir = os.path.join(os.path.dirname(self.vmdir), new_name)
os.rename(self.vmdir, new_vmdir)
self.vmdir = new_vmdir
if self.private_img:
self.private_img = self.private_img.replace(old_vmdir, new_vmdir)
if self.root_img:
self.root_img = self.root_img.replace(old_vmdir, new_vmdir)
if self.volatile_img:
self.volatile_img = self.volatile_img.replace(old_vmdir, new_vmdir)
def verify_files(self):
if not os.path.exists (self.vmdir):
raise QubesException (
"VM directory doesn't exist: {0}".\
format(self.vmdir))
if self.root_img and not os.path.exists (self.root_img):
raise QubesException (
"VM root image file doesn't exist: {0}".\
format(self.root_img))
if self.private_img and not os.path.exists (self.private_img):
raise QubesException (
"VM private image file doesn't exist: {0}".\
format(self.private_img))
if self.modules_img is not None:
if not os.path.exists(self.modules_img):
raise QubesException (
"VM kernel modules image does not exists: {0}".\
format(self.modules_img))
def remove_from_disk(self):
shutil.rmtree (self.vmdir)
def reset_volatile_storage(self, verbose = False, source_template = None):
if source_template is None:
source_template = self.vm.template
# Re-create only for template based VMs
if source_template is not None and self.volatile_img:
if os.path.exists(self.volatile_img):
os.remove(self.volatile_img)
# For StandaloneVM create it only if not already exists (eg after backup-restore)
if self.volatile_img and not os.path.exists(self.volatile_img):
if verbose:
print >> sys.stderr, "--> Creating volatile image: {0}...".\
format(self.volatile_img)
subprocess.check_call([system_path["prepare_volatile_img_cmd"],
self.volatile_img, str(self.root_img_size / 1024 / 1024)])
def prepare_for_vm_startup(self, verbose):
self.reset_volatile_storage(verbose=verbose)
if self.private_img and not os.path.exists (self.private_img):
print >>sys.stderr, "WARNING: Creating empty VM private image file: {0}".\
format(self.private_img)
self.create_on_disk_private_img(verbose=False)
def dump(o):
""" Returns a string represention of the given object
Args:
o (object): anything that response to `__module__` and `__class__`
Given the class :class:`qubes.storage.QubesVmStorage` it returns
'qubes.storage.QubesVmStorage' as string
"""
return o.__module__ + '.' + o.__class__.__name__
def load(string):
""" Given a dotted full module string representation of a class it loads it
Args:
string (str) i.e. 'qubes.storage.xen.QubesXenVmStorage'
Returns:
type
See also:
:func:`qubes.storage.dump`
"""
if not type(string) is str:
# This is a hack which allows giving a real class to a vm instead of a
# string as string_class parameter.
return string
components = string.split(".")
module_path = ".".join(components[:-1])
klass = components[-1:][0]
module = __import__(module_path, fromlist=[klass])
return getattr(module, klass)
def get_pool(name, vm):
""" Instantiates the storage for the specified vm """
config = _get_storage_config_parser()
klass = _get_pool_klass(name, config)
keys = [k for k in config.options(name) if k != 'driver' and k != 'class']
values = [config.get(name, o) for o in keys]
config_kwargs = dict(zip(keys, values))
if name == 'default':
kwargs = defaults['pool_config'].copy()
kwargs.update(keys)
else:
kwargs = config_kwargs
return klass(vm, **kwargs)
def pool_exists(name):
""" Check if the specified pool exists """
try:
_get_pool_klass(name)
return True
except StoragePoolException:
return False
def add_pool(name, **kwargs):
""" Add a storage pool to config."""
config = _get_storage_config_parser()
config.add_section(name)
for key, value in kwargs.iteritems():
config.set(name, key, value)
_write_config(config)
def remove_pool(name):
""" Remove a storage pool from config file. """
config = _get_storage_config_parser()
config.remove_section(name)
_write_config(config)
def _write_config(config):
with open(CONFIG_FILE, 'w') as configfile:
config.write(configfile)
def _get_storage_config_parser():
""" Instantiates a `ConfigParaser` for specified storage config file.
Returns:
RawConfigParser
"""
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
return config
def _get_pool_klass(name, config=None):
""" Returns the storage klass for the specified pool.
Args:
name: The pool name.
config: If ``config`` is not specified
`_get_storage_config_parser()` is called.
Returns:
type: A class inheriting from `QubesVmStorage`
"""
if config is None:
config = _get_storage_config_parser()
if not config.has_section(name):
raise StoragePoolException('Uknown storage pool ' + name)
if config.has_option(name, 'class'):
klass = load(config.get(name, 'class'))
elif config.has_option(name, 'driver'):
pool_driver = config.get(name, 'driver')
klass = defaults['pool_drivers'][pool_driver]
else:
raise StoragePoolException('Uknown storage pool driver ' + name)
return klass
class StoragePoolException(QubesException):
pass
class Pool(object):
def __init__(self, vm, dir_path):
assert vm is not None
assert dir_path is not None
self.vm = vm
self.dir_path = dir_path
self.create_dir_if_not_exists(self.dir_path)
self.vmdir = self.vmdir_path(vm, self.dir_path)
appvms_path = os.path.join(self.dir_path, 'appvms')
self.create_dir_if_not_exists(appvms_path)
servicevms_path = os.path.join(self.dir_path, 'servicevms')
self.create_dir_if_not_exists(servicevms_path)
vm_templates_path = os.path.join(self.dir_path, 'vm-templates')
self.create_dir_if_not_exists(vm_templates_path)
def vmdir_path(self, vm, pool_dir):
""" Returns the path to vmdir depending on the type of the VM.
The default QubesOS file storage saves the vm images in three
different directories depending on the ``QubesVM`` type:
* ``appvms`` for ``QubesAppVm`` or ``QubesHvm``
* ``vm-templates`` for ``QubesTemplateVm`` or ``QubesTemplateHvm``
* ``servicevms`` for any subclass of ``QubesNetVm``
Args:
vm: a QubesVM
pool_dir: the root directory of the pool
Returns:
string (str) absolute path to the directory where the vm files
are stored
"""
if vm.is_appvm():
subdir = 'appvms'
elif vm.is_template():
subdir = 'vm-templates'
elif vm.is_netvm():
subdir = 'servicevms'
elif vm.is_disposablevm():
subdir = 'appvms'
return os.path.join(pool_dir, subdir, vm.template.name + '-dvm')
else:
raise QubesException(vm.type() + ' unknown vm type')
return os.path.join(pool_dir, subdir, vm.name)
def create_dir_if_not_exists(self, path):
""" Check if a directory exists in if not create it.
This method does not create any parent directories.
"""
if not os.path.exists(path):
os.mkdir(path)
| gpl-2.0 | 4,979,779,882,463,710,000 | 32.910314 | 91 | 0.583047 | false |
eloquence/unisubs | apps/teams/models.py | 1 | 128528 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from collections import defaultdict
from itertools import groupby
from math import ceil
import csv
import datetime
import logging
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.files import File
from django.db import models
from django.db.models import query, Q
from django.db.models.signals import post_save, post_delete, pre_delete
from django.http import Http404
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _, ugettext
from haystack import site
from haystack.query import SQ
import teams.moderation_const as MODERATION
from caching import ModelCacheManager
from comments.models import Comment
from auth.models import UserLanguage, CustomUser as User
from auth.providers import get_authentication_provider
from messages import tasks as notifier
from subtitles import shims
from subtitles.signals import language_deleted
from teams.moderation_const import WAITING_MODERATION, UNMODERATED, APPROVED
from teams.permissions_const import (
TEAM_PERMISSIONS, PROJECT_PERMISSIONS, ROLE_OWNER, ROLE_ADMIN, ROLE_MANAGER,
ROLE_CONTRIBUTOR
)
from teams import tasks
from teams import workflows
from teams.signals import api_subtitles_approved, api_subtitles_rejected
from utils import DEFAULT_PROTOCOL
from utils import translation
from utils.amazon import S3EnabledImageField, S3EnabledFileField
from utils.panslugify import pan_slugify
from utils.searching import get_terms
from utils.text import fmt
from videos.models import (Video, VideoUrl, SubtitleVersion, SubtitleLanguage,
Action)
from videos.tasks import video_changed_tasks
from subtitles.models import (
SubtitleVersion as NewSubtitleVersion,
SubtitleLanguage as NewSubtitleLanguage,
SubtitleNoteBase,
ORIGIN_IMPORTED
)
from subtitles import pipeline
from functools import partial
logger = logging.getLogger(__name__)
celery_logger = logging.getLogger('celery.task')
BILLING_CUTOFF = getattr(settings, 'BILLING_CUTOFF', None)
# Teams
class TeamQuerySet(query.QuerySet):
def add_members_count(self):
"""Add _members_count field to this query
This can be used to order/filter the query and also avoids a query in
when Team.members_count() is called.
"""
select = {
'_members_count': (
'SELECT COUNT(1) '
'FROM teams_teammember tm '
'WHERE tm.team_id=teams_team.id'
)
}
return self.extra(select=select)
def add_videos_count(self):
"""Add _videos_count field to this query
This can be used to order/filter the query and also avoids a query in
when Team.video_count() is called.
"""
select = {
'_videos_count': (
'SELECT COUNT(1) '
'FROM teams_teamvideo tv '
'WHERE tv.team_id=teams_team.id'
)
}
return self.extra(select=select)
def add_user_is_member(self, user):
"""Add user_is_member field to this query """
if not user.is_authenticated():
return self.extra(select={'user_is_member': 0})
select = {
'user_is_member': (
'EXISTS (SELECT 1 '
'FROM teams_teammember tm '
'WHERE tm.team_id=teams_team.id '
'AND tm.user_id=%s)'
)
}
return self.extra(select=select, select_params=[user.id])
class TeamManager(models.Manager):
def get_query_set(self):
"""Return a QS of all non-deleted teams."""
return TeamQuerySet(Team).filter(deleted=False)
def for_user(self, user, exclude_private=False):
"""Return the teams visible for the given user.
If exclude_private is True, then we will exclude private teams, even
if the user can apply to them.
"""
# policies where we should show the team, even if they're not visible
visible_policies = [Team.OPEN, Team.APPLICATION]
q = models.Q(is_visible=True)
if not exclude_private:
q |= models.Q(membership_policy__in=visible_policies)
if user.is_authenticated():
user_teams = TeamMember.objects.filter(user=user)
q |= models.Q(id__in=user_teams.values('team_id'))
return self.get_query_set().filter(q)
def with_recent_billing_record(self, day_range):
"""Find teams that have had a new video recently"""
start_date = (datetime.datetime.now() -
datetime.timedelta(days=day_range))
team_ids = list(BillingRecord.objects
.order_by()
.filter(created__gt=start_date)
.values_list('team_id', flat=True)
.distinct())
return Team.objects.filter(id__in=team_ids)
def needs_new_video_notification(self, notify_interval):
return (self.filter(
notify_interval=notify_interval,
teamvideo__created__gt=models.F('last_notification_time'))
.distinct())
class Team(models.Model):
APPLICATION = 1
INVITATION_BY_MANAGER = 2
INVITATION_BY_ALL = 3
OPEN = 4
INVITATION_BY_ADMIN = 5
MEMBERSHIP_POLICY_CHOICES = (
(OPEN, _(u'Open')),
(APPLICATION, _(u'Application')),
(INVITATION_BY_ALL, _(u'Invitation by any team member')),
(INVITATION_BY_MANAGER, _(u'Invitation by manager')),
(INVITATION_BY_ADMIN, _(u'Invitation by admin')),
)
VP_MEMBER = 1
VP_MANAGER = 2
VP_ADMIN = 3
VIDEO_POLICY_CHOICES = (
(VP_MEMBER, _(u'Any team member')),
(VP_MANAGER, _(u'Managers and admins')),
(VP_ADMIN, _(u'Admins only'))
)
TASK_ASSIGN_CHOICES = (
(10, 'Any team member'),
(20, 'Managers and admins'),
(30, 'Admins only'),
)
TASK_ASSIGN_NAMES = dict(TASK_ASSIGN_CHOICES)
TASK_ASSIGN_IDS = dict([choice[::-1] for choice in TASK_ASSIGN_CHOICES])
SUBTITLE_CHOICES = (
(10, 'Anyone'),
(20, 'Any team member'),
(30, 'Only managers and admins'),
(40, 'Only admins'),
)
SUBTITLE_NAMES = dict(SUBTITLE_CHOICES)
SUBTITLE_IDS = dict([choice[::-1] for choice in SUBTITLE_CHOICES])
NOTIFY_DAILY = 'D'
NOTIFY_HOURLY = 'H'
NOTIFY_INTERVAL_CHOICES = (
(NOTIFY_DAILY, _('Daily')),
(NOTIFY_HOURLY, _('Hourly')),
)
name = models.CharField(_(u'name'), max_length=250, unique=True)
slug = models.SlugField(_(u'slug'), unique=True)
description = models.TextField(_(u'description'), blank=True, help_text=_('All urls will be converted to links. Line breaks and HTML not supported.'))
logo = S3EnabledImageField(verbose_name=_(u'logo'), blank=True,
upload_to='teams/logo/',
default='',
thumb_sizes=[(280, 100), (100, 100)])
square_logo = S3EnabledImageField(verbose_name=_(u'square logo'),
blank=True,
default='',
upload_to='teams/square-logo/',
thumb_sizes=[(100, 100), (48, 48)])
is_visible = models.BooleanField(_(u'videos public?'), default=True)
videos = models.ManyToManyField(Video, through='TeamVideo', verbose_name=_('videos'))
users = models.ManyToManyField(User, through='TeamMember', related_name='teams', verbose_name=_('users'))
points = models.IntegerField(default=0, editable=False)
applicants = models.ManyToManyField(User, through='Application', related_name='applicated_teams', verbose_name=_('applicants'))
created = models.DateTimeField(auto_now_add=True)
highlight = models.BooleanField(default=False)
video = models.ForeignKey(Video, null=True, blank=True, related_name='intro_for_teams', verbose_name=_(u'Intro Video'))
application_text = models.TextField(blank=True)
page_content = models.TextField(_(u'Page content'), blank=True, help_text=_(u'You can use markdown. This will replace Description.'))
is_moderated = models.BooleanField(default=False)
header_html_text = models.TextField(blank=True, default='', help_text=_(u"HTML that appears at the top of the teams page."))
last_notification_time = models.DateTimeField(editable=False, default=datetime.datetime.now)
notify_interval = models.CharField(max_length=1,
choices=NOTIFY_INTERVAL_CHOICES,
default=NOTIFY_DAILY)
auth_provider_code = models.CharField(_(u'authentication provider code'),
max_length=24, blank=True, default="")
# code value from one the TeamWorkflow subclasses
# Since other apps can add workflow types, let's use this system to avoid
# conflicts:
# - Core types are defined in the teams app and 1 char long
# - Extention types are defined on other apps. They are 2 chars long,
# with the first one being unique to the app.
workflow_type = models.CharField(max_length=2, default='O')
# Enabling Features
projects_enabled = models.BooleanField(default=False)
# Deprecated field that enables the tasks workflow
workflow_enabled = models.BooleanField(default=False)
# Policies and Permissions
membership_policy = models.IntegerField(_(u'membership policy'),
choices=MEMBERSHIP_POLICY_CHOICES,
default=OPEN)
video_policy = models.IntegerField(_(u'video policy'),
choices=VIDEO_POLICY_CHOICES,
default=VP_MEMBER)
# The values below here are mostly specific to the tasks workflow and will
# probably be deleted.
task_assign_policy = models.IntegerField(_(u'task assignment policy'),
choices=TASK_ASSIGN_CHOICES,
default=TASK_ASSIGN_IDS['Any team member'])
subtitle_policy = models.IntegerField(_(u'subtitling policy'),
choices=SUBTITLE_CHOICES,
default=SUBTITLE_IDS['Anyone'])
translate_policy = models.IntegerField(_(u'translation policy'),
choices=SUBTITLE_CHOICES,
default=SUBTITLE_IDS['Anyone'])
max_tasks_per_member = models.PositiveIntegerField(_(u'maximum tasks per member'),
default=None, null=True, blank=True)
task_expiration = models.PositiveIntegerField(_(u'task expiration (days)'),
default=None, null=True, blank=True)
deleted = models.BooleanField(default=False)
partner = models.ForeignKey('Partner', null=True, blank=True,
related_name='teams')
objects = TeamManager()
all_objects = models.Manager() # For accessing deleted teams, if necessary.
cache = ModelCacheManager()
class Meta:
ordering = ['name']
verbose_name = _(u'Team')
verbose_name_plural = _(u'Teams')
def __init__(self, *args, **kwargs):
models.Model.__init__(self, *args, **kwargs)
self._member_cache = {}
def save(self, *args, **kwargs):
creating = self.pk is None
super(Team, self).save(*args, **kwargs)
self.cache.invalidate()
if creating:
# create a default project
self.default_project
# setup our workflow
self.new_workflow.setup_team()
def __unicode__(self):
return self.name or self.slug
def is_tasks_team(self):
return self.workflow_enabled
@property
def new_workflow(self):
if not hasattr(self, '_new_workflow'):
self._new_workflow = workflows.TeamWorkflow.get_workflow(self)
return self._new_workflow
def is_old_style(self):
return self.workflow_type == "O"
def get_tasks_page_url(self):
return reverse('teams:team_tasks', kwargs={
'slug': self.slug,
})
def languages(self, members_joined_since=None):
"""Returns the languages spoken by the member of the team
"""
if members_joined_since:
users = self.members_since(members_joined_since)
else:
users = self.users.all()
return UserLanguage.objects.filter(user__in=users).values_list('language', flat=True)
def active_users(self, since=None, published=True):
sv = NewSubtitleVersion.objects.filter(video__in=self.videos.all())
if published:
sv = sv.filter(Q(visibility_override='public') | Q(visibility='public'))
if since:
sv = sv.filter(created__gt=datetime.datetime.now() - datetime.timedelta(days=since))
return sv.exclude(author__username="anonymous").values_list('author', 'subtitle_language')
def get_default_message(self, name):
return fmt(Setting.MESSAGE_DEFAULTS.get(name, ''), team=self)
def get_messages(self, names):
"""Fetch messages from the settings objects
This method fetches the messages assocated with names and interpolates
them to replace %(team)s with the team name.
Returns:
dict mapping names to message text
"""
messages = {
name: self.get_default_message(name)
for name in names
}
for setting in self.settings.with_names(names):
if setting.data:
messages[setting.key_name] = setting.data
return messages
def render_message(self, msg):
"""Return a string of HTML represention a team header for a notification.
TODO: Get this out of the model and into a templatetag or something.
"""
author_page = msg.author.get_absolute_url() if msg.author else ''
context = {
'team': self,
'msg': msg,
'author': msg.author,
'author_page': author_page,
'team_page': self.get_absolute_url(),
"STATIC_URL": settings.STATIC_URL,
}
return render_to_string('teams/_team_message.html', context)
def is_open(self):
"""Return whether this team's membership is open to the public."""
return self.membership_policy == self.OPEN
def is_by_application(self):
"""Return whether this team's membership is by application only."""
return self.membership_policy == self.APPLICATION
def get_workflow(self):
"""Return the workflow for the given team.
A workflow will always be returned. If one isn't specified for the team
a default (unsaved) one will be populated with default values and
returned.
TODO: Refactor this behaviour into something less confusing.
"""
return Workflow.get_for_target(self.id, 'team')
@property
def auth_provider(self):
"""Return the authentication provider class for this Team, or None.
No DB queries are used, so this is safe to call many times.
"""
if not self.auth_provider_code:
return None
else:
return get_authentication_provider(self.auth_provider_code)
# Thumbnails
def logo_thumbnail(self):
"""URL for a kind-of small version of this team's logo, or None."""
if self.logo:
return self.logo.thumb_url(100, 100)
def logo_thumbnail_medium(self):
"""URL for a medium version of this team's logo, or None."""
if self.logo:
return self.logo.thumb_url(280, 100)
def square_logo_thumbnail(self):
"""URL for this team's square logo, or None."""
if self.square_logo:
return self.square_logo.thumb_url(100, 100)
def square_logo_thumbnail_small(self):
"""URL for a small version of this team's square logo, or None."""
if self.square_logo:
return self.square_logo.thumb_url(48, 48)
# URLs
@models.permalink
def get_absolute_url(self):
return ('teams:dashboard', [self.slug])
def get_site_url(self):
"""Return the full, absolute URL for this team, including http:// and the domain."""
return '%s://%s%s' % (DEFAULT_PROTOCOL,
Site.objects.get_current().domain,
self.get_absolute_url())
# Membership and roles
def get_member(self, user):
"""Get a TeamMember object for a user or None."""
if not user.is_authenticated():
return None
if user.id in self._member_cache:
return self._member_cache[user.id]
try:
member = self.members.get(user=user)
except TeamMember.DoesNotExist:
member = None
self._member_cache[user.id] = member
return member
def user_is_member(self, user):
members = self.cache.get('members')
if members is None:
members = list(self.members.values_list('user_id', flat=True))
self.cache.set('members', members)
return user.id in members
def uncache_member(self, user):
try:
del self._member_cache[user.id]
except KeyError:
pass
def user_can_view_videos(self, user):
return self.is_visible or self.user_is_member(user)
def _is_role(self, user, role=None):
"""Return whether the given user has the given role in this team.
Safe to use with null or unauthenticated users.
If no role is given, simply return whether the user is a member of this team at all.
TODO: Change this to use the stuff in teams.permissions.
"""
if not user or not user.is_authenticated():
return False
qs = self.members.filter(user=user)
if role:
qs = qs.filter(role=role)
return qs.exists()
def can_bulk_approve(self, user):
return self.is_owner(user) or self.is_admin(user)
def is_owner(self, user):
"""
Return whether the given user is an owner of this team.
"""
return self._is_role(user, TeamMember.ROLE_OWNER)
def is_admin(self, user):
"""Return whether the given user is an admin of this team."""
return self._is_role(user, TeamMember.ROLE_ADMIN)
def is_manager(self, user):
"""Return whether the given user is a manager of this team."""
return self._is_role(user, TeamMember.ROLE_MANAGER)
def is_member(self, user):
"""Return whether the given user is a member of this team."""
return self._is_role(user)
def is_contributor(self, user, authenticated=True):
"""Return whether the given user is a contributor of this team, False otherwise."""
return self._is_role(user, TeamMember.ROLE_CONTRIBUTOR)
def can_see_video(self, user, team_video=None):
"""I have no idea.
TODO: Figure out what this thing is, and if it's still necessary.
"""
if not user.is_authenticated():
return False
return self.is_member(user)
def fetch_video_actions(self, video_language=None):
"""Fetch the Action objects for this team's videos
Args:
video_language: only actions for videos with this
primary_audio_language_code
"""
video_q = TeamVideo.objects.filter(team=self).values_list('video_id')
if video_language is not None:
video_q = video_q.filter(
video__primary_audio_language_code=video_language)
return Action.objects.filter(video_id__in=video_q)
# moderation
# Moderation
def moderates_videos(self):
"""Return whether this team moderates videos in some way, False otherwise.
Moderation means the team restricts who can create subtitles and/or
translations.
"""
if self.subtitle_policy != Team.SUBTITLE_IDS['Anyone']:
return True
if self.translate_policy != Team.SUBTITLE_IDS['Anyone']:
return True
return False
def video_is_moderated_by_team(self, video):
"""Return whether this team moderates the given video."""
return video.moderated_by == self
# Item counts
@property
def members_count(self):
"""Return the number of members of this team.
Caches the result in-object for performance.
"""
if not hasattr(self, '_members_count'):
setattr(self, '_members_count', self.users.count())
return self._members_count
def members_count_since(self, joined_since):
"""Return the number of members of this team who joined the last n days.
"""
return self.users.filter(date_joined__gt=datetime.datetime.now() - datetime.timedelta(days=joined_since)).count()
def members_since(self, joined_since):
""" Returns the members who joined the team the last n days
"""
return self.users.filter(date_joined__gt=datetime.datetime.now() - datetime.timedelta(days=joined_since))
@property
def videos_count(self):
"""Return the number of videos of this team.
Caches the result in-object for performance.
"""
if not hasattr(self, '_videos_count'):
setattr(self, '_videos_count', self.teamvideo_set.count())
return self._videos_count
def videos_count_since(self, added_since = None):
"""Return the number of videos of this team added the last n days.
"""
return self.teamvideo_set.filter(created__gt=datetime.datetime.now() - datetime.timedelta(days=added_since)).count()
def videos_since(self, added_since):
"""Returns the videos of this team added the last n days.
"""
return self.videos.filter(created__gt=datetime.datetime.now() - datetime.timedelta(days=added_since))
def unassigned_tasks(self, sort=None):
qs = Task.objects.filter(team=self, deleted=False, completed=None, assignee=None, type=Task.TYPE_IDS['Approve'])
if sort is not None:
qs = qs.order_by(sort)
return qs
def get_task(self, task_pk):
return Task.objects.get(pk=task_pk)
def get_tasks(self, task_pks):
return Task.objects.filter(pk__in=task_pks).select_related('new_subtitle_version', 'new_subtitle_version__subtitle_language', 'team_video', 'team_video__video', 'team_video__video__teamvideo', 'workflow')
def _count_tasks(self):
qs = Task.objects.filter(team=self, deleted=False, completed=None)
# quick, check, are there more than 1000 tasks, if so return 1001, and
# let the UI display > 1000
if qs[1000:1001].exists():
return 1001
else:
return qs.count()
@property
def tasks_count(self):
"""Return the number of incomplete, undeleted tasks of this team.
Caches the result in-object for performance.
Note: the count is capped at 1001 tasks. If a team has more than
that, we generally just want to display "> 1000". Use
get_tasks_count_display() to do that.
"""
if not hasattr(self, '_tasks_count'):
setattr(self, '_tasks_count', self._count_tasks())
return self._tasks_count
def get_tasks_count_display(self):
"""Get a string to display for our tasks count."""
if self.tasks_count <= 1000:
return unicode(self.tasks_count)
else:
return ugettext('> 1000')
# Applications (people applying to join)
def application_message(self):
"""Return the membership application message for this team, or '' if none exists."""
try:
return self.settings.get(key=Setting.KEY_IDS['messages_application']).data
except Setting.DoesNotExist:
return ''
@property
def applications_count(self):
"""Return the number of open membership applications to this team.
Caches the result in-object for performance.
"""
if not hasattr(self, '_applications_count'):
setattr(self, '_applications_count', self.applications.count())
return self._applications_count
# Language pairs
def _lang_pair(self, lp, suffix):
return SQ(content="{0}_{1}_{2}".format(lp[0], lp[1], suffix))
def get_videos_for_languages_haystack(self, language=None,
num_completed_langs=None,
project=None, user=None, query=None,
sort=None, exclude_language=None):
qs = self.get_videos_for_user(user)
if project:
qs = qs.filter(project_pk=project.pk)
if query:
for term in get_terms(query):
qs = qs.auto_query(qs.query.clean(term).decode('utf-8'))
if language:
qs = qs.filter(video_completed_langs=language)
if exclude_language:
qs = qs.exclude(video_completed_langs=exclude_language)
if num_completed_langs is not None:
qs = qs.filter(num_completed_langs=num_completed_langs)
qs = qs.order_by({
'name': 'video_title_exact',
'-name': '-video_title_exact',
'subs': 'num_completed_langs',
'-subs': '-num_completed_langs',
'time': 'team_video_create_date',
'-time': '-team_video_create_date',
}.get(sort or '-time'))
return qs
def get_videos_for_user(self, user):
from teams.search_indexes import TeamVideoLanguagesIndex
is_member = (user and user.is_authenticated()
and self.members.filter(user=user).exists())
if is_member:
return TeamVideoLanguagesIndex.results_for_members(self).filter(team_id=self.id)
else:
return TeamVideoLanguagesIndex.results().filter(team_id=self.id)
# Projects
@property
def default_project(self):
"""Return the default project for this team.
If it doesn't already exist it will be created.
TODO: Move the creation into a signal on the team to avoid creating
multiple default projects here?
"""
try:
return Project.objects.get(team=self, slug=Project.DEFAULT_NAME)
except Project.DoesNotExist:
p = Project(team=self,name=Project.DEFAULT_NAME)
p.save()
return p
@property
def has_projects(self):
"""Return whether this team has projects other than the default one."""
return self.project_set.count() > 1
# Readable/writeable language codes
def get_writable_langs(self):
"""Return a list of language code strings that are writable for this team.
This value may come from memcache.
"""
return TeamLanguagePreference.objects.get_writable(self)
def get_readable_langs(self):
"""Return a list of language code strings that are readable for this team.
This value may come from memcache.
"""
return TeamLanguagePreference.objects.get_readable(self)
def get_team_languages(self, since=None):
query_sl = NewSubtitleLanguage.objects.filter(video__in=self.videos.all())
new_languages = []
if since:
query_sl = query_sl.filter(id__in=NewSubtitleVersion.objects.filter(video__in=self.videos.all(),
created__gt=datetime.datetime.now() - datetime.timedelta(days=since)).order_by('subtitle_language').values_list('subtitle_language', flat=True).distinct())
new_languages = list(NewSubtitleLanguage.objects.filter(video__in=self.videos_since(since)).values_list('language_code', 'subtitles_complete'))
query_sl = query_sl.values_list('language_code', 'subtitles_complete')
languages = list(query_sl)
def first_member(x):
return x[0]
complete_languages = map(first_member, filter(lambda x: x[1], languages))
incomplete_languages = map(first_member, filter(lambda x: not x[1], languages))
new_languages = map(first_member, new_languages)
if since:
return (complete_languages, incomplete_languages, new_languages)
else:
return (complete_languages, incomplete_languages)
# This needs to be constructed after the model definition since we need a
# reference to the class itself.
Team._meta.permissions = TEAM_PERMISSIONS
# Project
class ProjectManager(models.Manager):
def for_team(self, team_identifier):
"""Return all non-default projects for the given team with the given identifier.
The team_identifier passed may be an actual Team object, or a string
containing a team slug, or the primary key of a team as an integer.
"""
if hasattr(team_identifier, "pk"):
team = team_identifier
elif isinstance(team_identifier, int):
team = Team.objects.get(pk=team_identifier)
elif isinstance(team_identifier, str):
team = Team.objects.get(slug=team_identifier)
return Project.objects.filter(team=team).exclude(name=Project.DEFAULT_NAME)
class Project(models.Model):
# All tvs belong to a project, wheather the team has enabled them or not
# the default project is just a convenience UI that pretends to be part of
# the team . If this ever gets changed, you need to change migrations/0044
DEFAULT_NAME = "_root"
team = models.ForeignKey(Team)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(blank=True)
name = models.CharField(max_length=255, null=False)
description = models.TextField(blank=True, null=True, max_length=2048)
guidelines = models.TextField(blank=True, null=True, max_length=2048)
slug = models.SlugField(blank=True)
order = models.PositiveIntegerField(default=0)
workflow_enabled = models.BooleanField(default=False)
objects = ProjectManager()
def __unicode__(self):
if self.is_default_project:
return u"---------"
return u"%s" % (self.name)
def display(self, default_project_label=None):
if self.is_default_project and default_project_label is not None:
return default_project_label
return self.__unicode__()
def save(self, slug=None,*args, **kwargs):
self.modified = datetime.datetime.now()
if slug is not None:
self.slug = pan_slugify(slug)
elif not self.slug:
self.slug = pan_slugify(self.name)
super(Project, self).save(*args, **kwargs)
@property
def is_default_project(self):
"""Return whether this project is a default project for a team."""
return self.name == Project.DEFAULT_NAME
def get_site_url(self):
"""Return the full, absolute URL for this project, including http:// and the domain."""
return '%s://%s%s' % (DEFAULT_PROTOCOL, Site.objects.get_current().domain, self.get_absolute_url())
@models.permalink
def get_absolute_url(self):
return ('teams:project_video_list', [self.team.slug, self.slug])
@property
def videos_count(self):
"""Return the number of videos in this project.
Caches the result in-object for performance.
"""
if not hasattr(self, '_videos_count'):
setattr(self, '_videos_count', TeamVideo.objects.filter(project=self).count())
return self._videos_count
def _count_tasks(self):
qs = tasks.filter(team_video__project = self)
# quick, check, are there more than 1000 tasks, if so return 1001, and
# let the UI display > 1000
if qs[1000:1001].exists():
return 1001
else:
return qs.count()
@property
def tasks_count(self):
"""Return the number of incomplete, undeleted tasks in this project.
Caches the result in-object for performance.
"""
tasks = Task.objects.filter(team=self.team, deleted=False, completed=None)
if not hasattr(self, '_tasks_count'):
setattr(self, '_tasks_count', self._count_tasks())
return self._tasks_count
class Meta:
unique_together = (
("team", "name",),
("team", "slug",),
)
permissions = PROJECT_PERMISSIONS
# TeamVideo
class TeamVideo(models.Model):
THUMBNAIL_SIZE = (288, 162)
team = models.ForeignKey(Team)
video = models.OneToOneField(Video)
description = models.TextField(blank=True,
help_text=_(u'Use this space to explain why you or your team need to '
u'caption or subtitle this video. Adding a note makes '
u'volunteers more likely to help out!'))
thumbnail = S3EnabledImageField(upload_to='teams/video_thumbnails/', null=True, blank=True,
help_text=_(u'We automatically grab thumbnails for certain sites, e.g. Youtube'),
thumb_sizes=(THUMBNAIL_SIZE, (120,90),))
all_languages = models.BooleanField(_('Need help with all languages'), default=False,
help_text=_(u'If you check this, other languages will not be displayed.'))
added_by = models.ForeignKey(User, null=True)
# this is an auto_add like field, but done on the model save so the
# admin doesn't throw a fit
created = models.DateTimeField(blank=True)
partner_id = models.CharField(max_length=100, blank=True, default="")
project = models.ForeignKey(Project)
class Meta:
unique_together = (('team', 'video'),)
def __unicode__(self):
return unicode(self.video)
@models.permalink
def get_absolute_url(self):
return ('teams:team_video', [self.pk])
def get_tasks_page_url(self):
return "%s?team_video=%s" % (self.team.get_tasks_page_url(), self.pk)
def get_thumbnail(self):
if self.thumbnail:
return self.thumbnail.thumb_url(*TeamVideo.THUMBNAIL_SIZE)
video_thumb = self.video.get_thumbnail(fallback=False)
if video_thumb:
return video_thumb
return "%simages/video-no-thumbnail-medium.png" % settings.STATIC_URL
def _original_language(self):
if not hasattr(self, 'original_language_code'):
sub_lang = self.video.subtitle_language()
setattr(self, 'original_language_code', None if not sub_lang else sub_lang.language)
return getattr(self, 'original_language_code')
def save(self, *args, **kwargs):
if not hasattr(self, "project"):
self.project = self.team.default_project
assert self.project.team == self.team, \
"%s: Team (%s) is not equal to project's (%s) team (%s)"\
% (self, self.team, self.project, self.project.team)
if not self.pk:
self.created = datetime.datetime.now()
self.video.cache.invalidate()
self.video.clear_team_video_cache()
super(TeamVideo, self).save(*args, **kwargs)
def is_checked_out(self, ignore_user=None):
'''Return whether this video is checked out in a task.
If a user is given, checkouts by that user will be ignored. This
provides a way to ask "can user X check out or work on this task?".
This is similar to the writelocking done on Videos and
SubtitleLanguages.
'''
tasks = self.task_set.filter(
# Find all tasks for this video which:
deleted=False, # - Aren't deleted
assignee__isnull=False, # - Are assigned to someone
language="", # - Aren't specific to a language
completed__isnull=True, # - Are unfinished
)
if ignore_user:
tasks = tasks.exclude(assignee=ignore_user)
return tasks.exists()
# Convenience functions
def subtitles_started(self):
"""Return whether subtitles have been started for this video."""
from subtitles.models import SubtitleLanguage
return (SubtitleLanguage.objects.having_nonempty_versions()
.filter(video=self.video)
.exists())
def subtitles_finished(self):
"""Return whether at least one set of subtitles has been finished for this video."""
qs = (self.video.newsubtitlelanguage_set.having_public_versions()
.filter(subtitles_complete=True))
for lang in qs:
if lang.is_synced():
return True
return False
def get_workflow(self):
"""Return the appropriate Workflow for this TeamVideo."""
return Workflow.get_for_team_video(self)
def move_to(self, new_team, project=None):
"""
Moves this TeamVideo to a new team.
This method expects you to have run the correct permissions checks.
"""
old_team = self.team
if old_team == new_team and project == self.project:
return
within_team = (old_team == new_team)
# these imports are here to avoid circular imports, hacky
from teams.signals import api_teamvideo_new
from teams.signals import video_moved_from_team_to_team
from videos import metadata_manager
# For now, we'll just delete any tasks associated with the moved video.
if not within_team:
self.task_set.update(deleted=True)
# We move the video by just switching the team, instead of deleting and
# recreating it.
self.team = new_team
# projects are always team dependent:
if project:
self.project = project
else:
self.project = new_team.default_project
self.save()
if not within_team:
# We need to make any as-yet-unmoderated versions public.
# TODO: Dedupe this and the team video delete signal.
video = self.video
video.newsubtitleversion_set.extant().update(visibility='public')
video.is_public = new_team.is_visible
video.moderated_by = new_team if new_team.moderates_videos() else None
video.save()
TeamVideoMigration.objects.create(from_team=old_team,
to_team=new_team,
to_project=self.project)
# Update search data and other things
video_changed_tasks.delay(video.pk)
# Create any necessary tasks.
autocreate_tasks(self)
# fire a http notification that a new video has hit this team:
api_teamvideo_new.send(self)
video_moved_from_team_to_team.send(sender=self,
destination_team=new_team, video=self.video)
def get_task_for_editor(self, language_code):
if not hasattr(self, '_editor_task'):
self._editor_task = self._get_task_for_editor(language_code)
return self._editor_task
def _get_task_for_editor(self, language_code):
task_set = self.task_set.incomplete().filter(language=language_code)
# 2533: We can get 2 review tasks if we include translate/transcribe
# tasks in the results. This is because when we have a task id and
# the user clicks endorse, we do the following:
# - save the subtitles
# - save the task, setting subtitle_version to the version that we
# just saved
#
# However, the task code creates a task on both of those steps. I'm not
# sure exactly what the old editor does to make this not happen, but
# it's safest to just not send task_id in that case
task_set = task_set.filter(type__in=(Task.TYPE_IDS['Review'],
Task.TYPE_IDS['Approve']))
# This assumes there is only 1 incomplete tasks at once, hopefully
# that's a good enough assumption to hold until we dump tasks for the
# collab model.
tasks = list(task_set[:1])
if tasks:
return tasks[0]
else:
return None
@staticmethod
def get_videos_non_language_ids(team, language_code, non_empty_language_code=False):
if non_empty_language_code:
return TeamVideo.objects.filter(
team=team).exclude(
video__primary_audio_language_code__gt=language_code).values_list('id', flat=True)
return TeamVideo.objects.filter(
team=team).exclude(
video__primary_audio_language_code=language_code).values_list('id', flat=True)
class TeamVideoMigration(models.Model):
from_team = models.ForeignKey(Team, related_name='+')
to_team = models.ForeignKey(Team, related_name='+')
to_project = models.ForeignKey(Project, related_name='+')
datetime = models.DateTimeField()
def __init__(self, *args, **kwargs):
if 'datetime' not in kwargs:
kwargs['datetime'] = self.now()
models.Model.__init__(self, *args, **kwargs)
@staticmethod
def now():
# Make now a function so we can patch it in the unittests
return datetime.datetime.now()
def _create_translation_tasks(team_video, subtitle_version=None):
"""Create any translation tasks that should be autocreated for this video.
subtitle_version should be the original SubtitleVersion that these tasks
will probably be translating from.
"""
preferred_langs = TeamLanguagePreference.objects.get_preferred(team_video.team)
for lang in preferred_langs:
# Don't create tasks for languages that are already complete.
sl = team_video.video.subtitle_language(lang)
if sl and sl.is_complete_and_synced():
continue
# Don't create tasks for languages that already have one. This includes
# review/approve tasks and such.
# Doesn't matter if it's complete or not.
task_exists = Task.objects.not_deleted().filter(
team=team_video.team, team_video=team_video, language=lang
).exists()
if task_exists:
continue
# Otherwise, go ahead and create it.
task = Task(team=team_video.team, team_video=team_video,
language=lang, type=Task.TYPE_IDS['Translate'])
# we should only update the team video after all tasks for
# this video are saved, else we end up with a lot of
# wasted tasks
task.save(update_team_video_index=False)
tasks.update_one_team_video.delay(team_video.pk)
def autocreate_tasks(team_video):
workflow = Workflow.get_for_team_video(team_video)
existing_subtitles = team_video.video.completed_subtitle_languages(public_only=True)
# We may need to create a transcribe task, if there are no existing subs.
if workflow.autocreate_subtitle and not existing_subtitles:
if not team_video.task_set.not_deleted().exists():
original_language = team_video.video.primary_audio_language_code
Task(team=team_video.team,
team_video=team_video,
subtitle_version=None,
language= original_language or '',
type=Task.TYPE_IDS['Subtitle']
).save()
# If there are existing subtitles, we may need to create translate tasks.
#
# TODO: This sets the "source version" for the translations to an arbitrary
# language's version. In practice this probably won't be a problem
# because most teams will transcribe one language and then send to a
# new team for translation, but we can probably be smarter about this
# if we spend some time.
if workflow.autocreate_translate and existing_subtitles:
_create_translation_tasks(team_video)
def team_video_save(sender, instance, created, **kwargs):
"""Update the Solr index for this team video.
TODO: Rename this to something more specific.
"""
tasks.update_one_team_video.delay(instance.id)
def team_video_delete(sender, instance, **kwargs):
"""Perform necessary actions for when a TeamVideo is deleted.
TODO: Split this up into separate signals.
"""
from videos import metadata_manager
# not using an async task for this since the async task
# could easily execute way after the instance is gone,
# and backend.remove requires the instance.
tv_search_index = site.get_index(TeamVideo)
tv_search_index.backend.remove(instance)
try:
video = instance.video
# we need to publish all unpublished subs for this video:
NewSubtitleVersion.objects.filter(video=video,
visibility='private').update(visibility='public')
video.is_public = True
video.moderated_by = None
video.save()
metadata_manager.update_metadata(video.pk)
video.update_search_index()
except Video.DoesNotExist:
pass
if instance.video_id is not None:
Video.cache.invalidate_by_pk(instance.video_id)
def on_language_deleted(sender, **kwargs):
"""When a language is deleted, delete all tasks associated with it."""
team_video = sender.video.get_team_video()
if not team_video:
return
Task.objects.filter(team_video=team_video,
language=sender.language_code).delete()
# check if there are no more source languages for the video, and in that
# case delete all transcribe tasks. Don't delete:
# - transcribe tasks that have already been started
# - review tasks
# - approve tasks
if not sender.video.has_public_version():
# filtering on new_subtitle_version=None excludes all 3 cases where we
# don't want to delete tasks
Task.objects.filter(team_video=team_video,
new_subtitle_version=None).delete()
def team_video_autocreate_task(sender, instance, created, raw, **kwargs):
"""Create subtitle/translation tasks for a newly added TeamVideo, if necessary."""
if created and not raw:
autocreate_tasks(instance)
def team_video_add_video_moderation(sender, instance, created, raw, **kwargs):
"""Set the .moderated_by attribute on a newly created TeamVideo's Video, if necessary."""
if created and not raw and instance.team.moderates_videos():
instance.video.moderated_by = instance.team
instance.video.save()
def team_video_rm_video_moderation(sender, instance, **kwargs):
"""Clear the .moderated_by attribute on a newly deleted TeamVideo's Video, if necessary."""
try:
# when removing a video, this will be triggered by the fk constraing
# and will be already removed
instance.video.moderated_by = None
instance.video.save()
except Video.DoesNotExist:
pass
post_save.connect(team_video_save, TeamVideo, dispatch_uid="teams.teamvideo.team_video_save")
post_save.connect(team_video_autocreate_task, TeamVideo, dispatch_uid='teams.teamvideo.team_video_autocreate_task')
post_save.connect(team_video_add_video_moderation, TeamVideo, dispatch_uid='teams.teamvideo.team_video_add_video_moderation')
post_delete.connect(team_video_delete, TeamVideo, dispatch_uid="teams.teamvideo.team_video_delete")
post_delete.connect(team_video_rm_video_moderation, TeamVideo, dispatch_uid="teams.teamvideo.team_video_rm_video_moderation")
language_deleted.connect(on_language_deleted, dispatch_uid="teams.subtitlelanguage.language_deleted")
# TeamMember
class TeamMemberManager(models.Manager):
use_for_related_fields = True
def create_first_member(self, team, user):
"""Make sure that new teams always have an 'owner' member."""
tm = TeamMember(team=team, user=user, role=ROLE_OWNER)
tm.save()
return tm
def admins(self):
return self.filter(role__in=(ROLE_OWNER, ROLE_ADMIN))
class TeamMember(models.Model):
ROLE_OWNER = ROLE_OWNER
ROLE_ADMIN = ROLE_ADMIN
ROLE_MANAGER = ROLE_MANAGER
ROLE_CONTRIBUTOR = ROLE_CONTRIBUTOR
ROLES = (
(ROLE_OWNER, _("Owner")),
(ROLE_MANAGER, _("Manager")),
(ROLE_ADMIN, _("Admin")),
(ROLE_CONTRIBUTOR, _("Contributor")),
)
team = models.ForeignKey(Team, related_name='members')
user = models.ForeignKey(User, related_name='team_members')
role = models.CharField(max_length=16, default=ROLE_CONTRIBUTOR, choices=ROLES, db_index=True)
created = models.DateTimeField(default=datetime.datetime.now, null=True,
blank=True)
objects = TeamMemberManager()
def __unicode__(self):
return u'%s' % self.user
def save(self, *args, **kwargs):
super(TeamMember, self).save(*args, **kwargs)
Team.cache.invalidate_by_pk(self.team_id)
def delete(self):
super(TeamMember, self).delete()
Team.cache.invalidate_by_pk(self.team_id)
def project_narrowings(self):
"""Return any project narrowings applied to this member."""
return self.narrowings.filter(project__isnull=False)
def language_narrowings(self):
"""Return any language narrowings applied to this member."""
return self.narrowings.filter(project__isnull=True)
def project_narrowings_fast(self):
"""Return any project narrowings applied to this member.
Caches the result in-object for speed.
"""
return [n for n in self.narrowings_fast() if n.project]
def language_narrowings_fast(self):
"""Return any language narrowings applied to this member.
Caches the result in-object for speed.
"""
return [n for n in self.narrowings_fast() if n.language]
def narrowings_fast(self):
"""Return any narrowings (both project and language) applied to this member.
Caches the result in-object for speed.
"""
if hasattr(self, '_cached_narrowings'):
if self._cached_narrowings is not None:
return self._cached_narrowings
self._cached_narrowings = self.narrowings.all()
return self._cached_narrowings
def has_max_tasks(self):
"""Return whether this member has the maximum number of tasks."""
max_tasks = self.team.max_tasks_per_member
if max_tasks:
if self.user.task_set.incomplete().filter(team=self.team).count() >= max_tasks:
return True
return False
def is_manager(self):
"""Test if the user is a manager or above."""
return self.role in (ROLE_OWNER, ROLE_ADMIN, ROLE_MANAGER)
def is_admin(self):
"""Test if the user is an admin or owner."""
return self.role in (ROLE_OWNER, ROLE_ADMIN)
class Meta:
unique_together = (('team', 'user'),)
def clear_tasks(sender, instance, *args, **kwargs):
"""Unassign all tasks assigned to a user.
Used when deleting a user from a team.
"""
tasks = instance.team.task_set.incomplete().filter(assignee=instance.user)
tasks.update(assignee=None)
pre_delete.connect(clear_tasks, TeamMember, dispatch_uid='teams.members.clear-tasks-on-delete')
# MembershipNarrowing
class MembershipNarrowing(models.Model):
"""Represent narrowings that can be made on memberships.
A single MembershipNarrowing can apply to a project or a language, but not both.
"""
member = models.ForeignKey(TeamMember, related_name="narrowings")
project = models.ForeignKey(Project, null=True, blank=True)
language = models.CharField(max_length=24, blank=True,
choices=translation.ALL_LANGUAGE_CHOICES)
added_by = models.ForeignKey(TeamMember, related_name="narrowing_includer", null=True, blank=True)
created = models.DateTimeField(auto_now_add=True, blank=None)
modified = models.DateTimeField(auto_now=True, blank=None)
def __unicode__(self):
if self.project:
return u"Permission restriction for %s to project %s " % (self.member, self.project)
else:
return u"Permission restriction for %s to language %s " % (self.member, self.language)
def save(self, *args, **kwargs):
# Cannot have duplicate narrowings for a language.
if self.language:
duplicate_exists = MembershipNarrowing.objects.filter(
member=self.member, language=self.language
).exclude(id=self.id).exists()
assert not duplicate_exists, "Duplicate language narrowing detected!"
# Cannot have duplicate narrowings for a project.
if self.project:
duplicate_exists = MembershipNarrowing.objects.filter(
member=self.member, project=self.project
).exclude(id=self.id).exists()
assert not duplicate_exists, "Duplicate project narrowing detected!"
super(MembershipNarrowing, self).save(*args, **kwargs)
Team.cache.invalidate_by_pk(self.member.team_id)
def delete(self):
super(MembershipNarrowing, self).delete()
Team.cache.invalidate_by_pk(self.member.team_id)
class TeamSubtitleNote(SubtitleNoteBase):
team = models.ForeignKey(Team, related_name='+')
class ApplicationInvalidException(Exception):
pass
class ApplicationManager(models.Manager):
def can_apply(self, team, user):
"""
A user can apply either if he is not a member of the team yet, the
team hasn't said no to the user (either application denied or removed the user'
and if no applications are pending.
"""
sour_application_exists = self.filter(team=team, user=user, status__in=[
Application.STATUS_MEMBER_REMOVED, Application.STATUS_DENIED,
Application.STATUS_PENDING]).exists()
if sour_application_exists:
return False
return not team.is_member(user)
def open(self, team=None, user=None):
qs = self.filter(status=Application.STATUS_PENDING)
if team:
qs = qs.filter(team=team)
if user:
qs = qs.filter(user=user)
return qs
# Application
class Application(models.Model):
team = models.ForeignKey(Team, related_name='applications')
user = models.ForeignKey(User, related_name='team_applications')
note = models.TextField(blank=True)
# None -> not acted upon
# True -> Approved
# False -> Rejected
STATUS_PENDING,STATUS_APPROVED, STATUS_DENIED, STATUS_MEMBER_REMOVED,\
STATUS_MEMBER_LEFT = xrange(0, 5)
STATUSES = (
(STATUS_PENDING, u"Pending"),
(STATUS_APPROVED, u"Approved"),
(STATUS_DENIED, u"Denied"),
(STATUS_MEMBER_REMOVED, u"Member Removed"),
(STATUS_MEMBER_LEFT, u"Member Left"),
)
STATUSES_IDS = dict([choice[::-1] for choice in STATUSES])
status = models.PositiveIntegerField(default=STATUS_PENDING, choices=STATUSES)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(blank=True, null=True)
# free text keeping a log of changes to this application
history = models.TextField(blank=True, null=True)
objects = ApplicationManager()
class Meta:
unique_together = (('team', 'user', 'status'),)
def approve(self, author, interface):
"""Approve the application.
This will create an appropriate TeamMember if this application has
not been already acted upon
"""
if self.status not in (Application.STATUS_PENDING, Application.STATUS_MEMBER_LEFT):
raise ApplicationInvalidException("")
member, created = TeamMember.objects.get_or_create(team=self.team, user=self.user)
if created:
notifier.team_member_new.delay(member.pk)
self.modified = datetime.datetime.now()
self.status = Application.STATUS_APPROVED
self.save(author=author, interface=interface)
return self
def deny(self, author, interface):
"""
Marks the application as not approved, then
Queue a Celery task that will handle properly denying this
application.
"""
if self.status != Application.STATUS_PENDING:
raise ApplicationInvalidException("")
self.modified = datetime.datetime.now()
self.status = Application.STATUS_DENIED
self.save(author=author, interface=interface)
notifier.team_application_denied.delay(self.pk)
return self
def on_member_leave(self, author, interface):
"""
Marks the appropriate status, but users can still
reapply to a team if they so desire later.
"""
self.status = Application.STATUS_MEMBER_LEFT
self.save(author=author, interface=interface)
def on_member_removed(self, author, interface):
"""
Marks the appropriate status so that user's cannot reapply
to a team after being removed.
"""
self.status = Application.STATUS_MEMBER_REMOVED
self.save(author=author, interface=interface)
def _generate_history_line(self, new_status, author=None, interface=None):
author = author or "?"
interface = interface or "web UI"
new_status = new_status if new_status != None else Application.STATUS_PENDING
for value,name in Application.STATUSES:
if value == new_status:
status = name
assert status
return u"%s by %s from %s (%s)\n" % (status, author, interface, datetime.datetime.now())
def save(self, dispatches_http_callback=True, author=None, interface=None, *args, **kwargs):
"""
Saves the model, but also appends a line on the history for that
model, like these:
- CoolGuy Approved through the web UI.
- Arthur Left team through the web UI.
This way,we can keep one application per user per team, never
delete them (so the messages stay current) and we still can
track history
"""
self.history = (self.history or "") + self._generate_history_line(self.status, author, interface)
super(Application, self).save(*args, **kwargs)
if dispatches_http_callback:
from teams.signals import api_application_new
api_application_new.send(self)
def __unicode__(self):
return "Application: %s - %s - %s" % (self.team.slug, self.user.username, self.get_status_display())
# Invites
class InviteExpiredException(Exception):
pass
class InviteManager(models.Manager):
def pending_for(self, team, user):
return self.filter(team=team, user=user, approved=None)
def acted_on(self, team, user):
return self.filter(team=team, user=user, approved__notnull=True)
class Invite(models.Model):
team = models.ForeignKey(Team, related_name='invitations')
user = models.ForeignKey(User, related_name='team_invitations')
note = models.TextField(blank=True, max_length=200)
author = models.ForeignKey(User)
role = models.CharField(max_length=16, choices=TeamMember.ROLES,
default=TeamMember.ROLE_CONTRIBUTOR)
# None -> not acted upon
# True -> Approved
# False -> Rejected
approved = models.NullBooleanField(default=None)
objects = InviteManager()
def accept(self):
"""Accept this invitation.
Creates an appropriate TeamMember record, sends a notification and
deletes itself.
"""
if self.approved is not None:
raise InviteExpiredException("")
self.approved = True
member, created = TeamMember.objects.get_or_create(
team=self.team, user=self.user, role=self.role)
if created:
notifier.team_member_new.delay(member.pk)
self.save()
return True
def deny(self):
"""Deny this invitation.
Could be useful to send a notification here in the future.
"""
if self.approved is not None:
raise InviteExpiredException("")
self.approved = False
self.save()
def message_json_data(self, data, msg):
data['can-reply'] = False
return data
# Workflows
class Workflow(models.Model):
REVIEW_CHOICES = (
(00, "Don't require review"),
(10, 'Peer must review'),
(20, 'Manager must review'),
(30, 'Admin must review'),
)
REVIEW_NAMES = dict(REVIEW_CHOICES)
REVIEW_IDS = dict([choice[::-1] for choice in REVIEW_CHOICES])
APPROVE_CHOICES = (
(00, "Don't require approval"),
(10, 'Manager must approve'),
(20, 'Admin must approve'),
)
APPROVE_NAMES = dict(APPROVE_CHOICES)
APPROVE_IDS = dict([choice[::-1] for choice in APPROVE_CHOICES])
team = models.ForeignKey(Team)
project = models.ForeignKey(Project, blank=True, null=True)
team_video = models.ForeignKey(TeamVideo, blank=True, null=True)
autocreate_subtitle = models.BooleanField(default=False)
autocreate_translate = models.BooleanField(default=False)
review_allowed = models.PositiveIntegerField(
choices=REVIEW_CHOICES, verbose_name='reviewers', default=0)
approve_allowed = models.PositiveIntegerField(
choices=APPROVE_CHOICES, verbose_name='approvers', default=0)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
class Meta:
unique_together = ('team', 'project', 'team_video')
@classmethod
def _get_target_team(cls, id, type):
"""Return the team for the given target.
The target is identified by id (its PK as an integer) and type (a string
of 'team_video', 'project', or 'team').
"""
if type == 'team_video':
return TeamVideo.objects.select_related('team').get(pk=id).team
elif type == 'project':
return Project.objects.select_related('team').get(pk=id).team
else:
return Team.objects.get(pk=id)
@classmethod
def get_for_target(cls, id, type, workflows=None):
'''Return the most specific Workflow for the given target.
If target object does not exist, None is returned.
If workflows is given, it should be a QS or List of all Workflows for
the TeamVideo's team. This will let you look it up yourself once and
use it in many of these calls to avoid hitting the DB each time.
If workflows is not given it will be looked up with one DB query.
'''
if not workflows:
team = Workflow._get_target_team(id, type)
workflows = list(Workflow.objects.filter(team=team.id)
.select_related('project', 'team',
'team_video'))
else:
team = workflows[0].team
default_workflow = Workflow(team=team)
if not workflows:
return default_workflow
if type == 'team_video':
try:
return [w for w in workflows
if w.team_video and w.team_video.id == id][0]
except IndexError:
# If there's no video-specific workflow for this video, there
# might be a workflow for its project, so we'll start looking
# for that instead.
team_video = TeamVideo.objects.get(pk=id)
id, type = team_video.project_id, 'project'
if type == 'project':
try:
return [w for w in workflows
if w.project and w.project.workflow_enabled
and w.project.id == id and not w.team_video][0]
except IndexError:
# If there's no project-specific workflow for this project,
# there might be one for its team, so we'll fall through.
pass
if not team.workflow_enabled:
return default_workflow
return [w for w in workflows
if (not w.project) and (not w.team_video)][0]
@classmethod
def get_for_team_video(cls, team_video, workflows=None):
'''Return the most specific Workflow for the given team_video.
If workflows is given, it should be a QuerySet or List of all Workflows
for the TeamVideo's team. This will let you look it up yourself once
and use it in many of these calls to avoid hitting the DB each time.
If workflows is not given it will be looked up with one DB query.
NOTE: This function caches the workflow for performance reasons. If the
workflow changes within the space of a single request that
_cached_workflow should be cleared.
'''
if not hasattr(team_video, '_cached_workflow'):
team_video._cached_workflow = Workflow.get_for_target(
team_video.id, 'team_video', workflows)
return team_video._cached_workflow
@classmethod
def get_for_project(cls, project, workflows=None):
'''Return the most specific Workflow for the given project.
If workflows is given, it should be a QuerySet or List of all Workflows
for the Project's team. This will let you look it up yourself once
and use it in many of these calls to avoid hitting the DB each time.
If workflows is not given it will be looked up with one DB query.
'''
return Workflow.get_for_target(project.id, 'project', workflows)
@classmethod
def add_to_team_videos(cls, team_videos):
'''Add the appropriate Workflow objects to each TeamVideo as .workflow.
This will only perform one DB query, and it will add the most specific
workflow possible to each TeamVideo.
This only exists for performance reasons.
'''
if not team_videos:
return []
workflows = list(Workflow.objects.filter(team=team_videos[0].team))
for tv in team_videos:
tv.workflow = Workflow.get_for_team_video(tv, workflows)
def get_specific_target(self):
"""Return the most specific target that this workflow applies to."""
return self.team_video or self.project or self.team
def __unicode__(self):
target = self.get_specific_target()
return u'Workflow %s for %s (%s %d)' % (
self.pk, target, target.__class__.__name__, target.pk)
# Convenience functions for checking if a step of the workflow is enabled.
@property
def review_enabled(self):
"""Return whether any form of review is enabled for this workflow."""
return True if self.review_allowed else False
@property
def approve_enabled(self):
"""Return whether any form of approval is enabled for this workflow."""
return True if self.approve_allowed else False
@property
def requires_review_or_approval(self):
"""Return whether a given workflow requires review or approval."""
return self.approve_enabled or self.review_enabled
@property
def requires_tasks(self):
"""Return whether a given workflow requires the use of tasks."""
return (self.requires_review_or_approval or self.autocreate_subtitle
or self.autocreate_translate)
# Tasks
class TaskManager(models.Manager):
def not_deleted(self):
"""Return a QS of tasks that are not deleted."""
return self.get_query_set().filter(deleted=False)
def incomplete(self):
"""Return a QS of tasks that are not deleted or completed."""
return self.not_deleted().filter(completed=None)
def complete(self):
"""Return a QS of tasks that are not deleted, but are completed."""
return self.not_deleted().filter(completed__isnull=False)
def _type(self, types, completed=None, approved=None):
"""Return a QS of tasks that are not deleted and are one of the given types.
types should be a list of strings matching a label in Task.TYPE_CHOICES.
completed should be one of:
* True (only show completed tasks)
* False (only show incomplete tasks)
* None (don't filter on completion status)
approved should be either None or a string matching a label in
Task.APPROVED_CHOICES.
"""
type_ids = [Task.TYPE_IDS[type] for type in types]
qs = self.not_deleted().filter(type__in=type_ids)
if completed == False:
qs = qs.filter(completed=None)
elif completed == True:
qs = qs.filter(completed__isnull=False)
if approved:
qs = qs.filter(approved=Task.APPROVED_IDS[approved])
return qs
def incomplete_subtitle(self):
"""Return a QS of subtitle tasks that are not deleted or completed."""
return self._type(['Subtitle'], False)
def incomplete_translate(self):
"""Return a QS of translate tasks that are not deleted or completed."""
return self._type(['Translate'], False)
def incomplete_review(self):
"""Return a QS of review tasks that are not deleted or completed."""
return self._type(['Review'], False)
def incomplete_approve(self):
"""Return a QS of approve tasks that are not deleted or completed."""
return self._type(['Approve'], False)
def incomplete_subtitle_or_translate(self):
"""Return a QS of subtitle or translate tasks that are not deleted or completed."""
return self._type(['Subtitle', 'Translate'], False)
def incomplete_review_or_approve(self):
"""Return a QS of review or approve tasks that are not deleted or completed."""
return self._type(['Review', 'Approve'], False)
def complete_subtitle(self):
"""Return a QS of subtitle tasks that are not deleted, but are completed."""
return self._type(['Subtitle'], True)
def complete_translate(self):
"""Return a QS of translate tasks that are not deleted, but are completed."""
return self._type(['Translate'], True)
def complete_review(self, approved=None):
"""Return a QS of review tasks that are not deleted, but are completed.
If approved is given the tasks are further filtered on their .approved
attribute. It must be a string matching one of the labels in
Task.APPROVED_CHOICES, like 'Rejected'.
"""
return self._type(['Review'], True, approved)
def complete_approve(self, approved=None):
"""Return a QS of approve tasks that are not deleted, but are completed.
If approved is given the tasks are further filtered on their .approved
attribute. It must be a string matching one of the labels in
Task.APPROVED_CHOICES, like 'Rejected'.
"""
return self._type(['Approve'], True, approved)
def complete_subtitle_or_translate(self):
"""Return a QS of subtitle or translate tasks that are not deleted, but are completed."""
return self._type(['Subtitle', 'Translate'], True)
def complete_review_or_approve(self, approved=None):
"""Return a QS of review or approve tasks that are not deleted, but are completed.
If approved is given the tasks are further filtered on their .approved
attribute. It must be a string matching one of the labels in
Task.APPROVED_CHOICES, like 'Rejected'.
"""
return self._type(['Review', 'Approve'], True, approved)
def all_subtitle(self):
"""Return a QS of subtitle tasks that are not deleted."""
return self._type(['Subtitle'])
def all_translate(self):
"""Return a QS of translate tasks that are not deleted."""
return self._type(['Translate'])
def all_review(self):
"""Return a QS of review tasks that are not deleted."""
return self._type(['Review'])
def all_approve(self):
"""Return a QS of tasks that are not deleted."""
return self._type(['Approve'])
def all_subtitle_or_translate(self):
"""Return a QS of subtitle or translate tasks that are not deleted."""
return self._type(['Subtitle', 'Translate'])
def all_review_or_approve(self):
"""Return a QS of review or approve tasks that are not deleted."""
return self._type(['Review', 'Approve'])
class Task(models.Model):
TYPE_CHOICES = (
(10, 'Subtitle'),
(20, 'Translate'),
(30, 'Review'),
(40, 'Approve'),
)
TYPE_NAMES = dict(TYPE_CHOICES)
TYPE_IDS = dict([choice[::-1] for choice in TYPE_CHOICES])
APPROVED_CHOICES = (
(10, 'In Progress'),
(20, 'Approved'),
(30, 'Rejected'),
)
APPROVED_NAMES = dict(APPROVED_CHOICES)
APPROVED_IDS = dict([choice[::-1] for choice in APPROVED_CHOICES])
APPROVED_FINISHED_IDS = (20, 30)
type = models.PositiveIntegerField(choices=TYPE_CHOICES)
team = models.ForeignKey(Team)
team_video = models.ForeignKey(TeamVideo)
language = models.CharField(max_length=16,
choices=translation.ALL_LANGUAGE_CHOICES,
blank=True, db_index=True)
assignee = models.ForeignKey(User, blank=True, null=True)
subtitle_version = models.ForeignKey(SubtitleVersion, blank=True, null=True)
new_subtitle_version = models.ForeignKey(NewSubtitleVersion,
blank=True, null=True)
# The original source version being reviewed or approved.
#
# For example, if person A creates two versions while working on a subtitle
# task:
#
# v1 v2
# --o---o
# s s
#
# and then the reviewer and approver make some edits
#
# v1 v2 v3 v4 v5
# --o---o---o---o---o
# s s r r a
# *
#
# the review_base_version will be v2. Once approved, if an edit is made it
# needs to be approved as well, and the same thing happens:
#
# v1 v2 v3 v4 v5 v6 v7
# --o---o---o---o---o---o---o
# s s r r a e a
# *
#
# This is used when rejecting versions, and may be used elsewhere in the
# future as well.
review_base_version = models.ForeignKey(SubtitleVersion, blank=True,
null=True,
related_name='tasks_based_on')
new_review_base_version = models.ForeignKey(NewSubtitleVersion, blank=True,
null=True,
related_name='tasks_based_on_new')
deleted = models.BooleanField(default=False)
# TODO: Remove this field.
public = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
completed = models.DateTimeField(blank=True, null=True)
expiration_date = models.DateTimeField(blank=True, null=True)
# Arbitrary priority for tasks. Some teams might calculate this
# on complex criteria and expect us to be able to sort tasks on it.
# Higher numbers mean higher priority
priority = models.PositiveIntegerField(blank=True, default=0, db_index=True)
# Review and Approval -specific fields
approved = models.PositiveIntegerField(choices=APPROVED_CHOICES,
null=True, blank=True)
body = models.TextField(blank=True, default="")
objects = TaskManager()
def __unicode__(self):
return u'Task %s (%s) for %s' % (self.id or "unsaved",
self.get_type_display(),
self.team_video)
@property
def summary(self):
"""
Return a brief summary of the task
"""
output = unicode(self.team_video)
if self.body:
output += unicode(self.body.split('\n',1)[0].strip()[:20])
return output
@staticmethod
def now():
"""datetime.datetime.now as a method
This lets us patch it in the unittests.
"""
return datetime.datetime.now()
def is_subtitle_task(self):
return self.type == Task.TYPE_IDS['Subtitle']
def is_translate_task(self):
return self.type == Task.TYPE_IDS['Translate']
def is_review_task(self):
return self.type == Task.TYPE_IDS['Review']
def is_approve_task(self):
return self.type == Task.TYPE_IDS['Approve']
@property
def workflow(self):
'''Return the most specific workflow for this task's TeamVideo.'''
return Workflow.get_for_team_video(self.team_video)
@staticmethod
def add_cached_video_urls(tasks):
"""Add the cached_video_url attribute to a list of atkss
cached_video_url is the URL as a string for the video.
"""
team_video_pks = [t.team_video_id for t in tasks]
video_urls = (VideoUrl.objects
.filter(video__teamvideo__id__in=team_video_pks)
.filter(primary=True))
video_url_map = dict((vu.video_id, vu.effective_url)
for vu in video_urls)
for t in tasks:
t.cached_video_url = video_url_map.get(t.team_video.video_id)
def _add_comment(self, lang_ct=None):
"""Add a comment on the SubtitleLanguage for this task with the body as content."""
if self.body.strip():
if lang_ct is None:
lang_ct = ContentType.objects.get_for_model(NewSubtitleLanguage)
comment = Comment(
content=self.body,
object_pk=self.new_subtitle_version.subtitle_language.pk,
content_type=lang_ct,
submit_date=self.completed,
user=self.assignee,
)
comment.save()
notifier.send_video_comment_notification.delay(
comment.pk, version_pk=self.new_subtitle_version.pk)
def future(self):
"""Return whether this task expires in the future."""
return self.expiration_date > self.now()
# Functions related to task completion.
def _send_back(self, sends_notification=True):
"""Handle "rejection" of this task.
This will:
* Create a new task with the appropriate type (translate or subtitle).
* Try to reassign it to the previous assignee, leaving it unassigned
if that's not possible.
* Send a notification unless sends_notification is given as False.
NOTE: This function does not modify the *current* task in any way.
"""
# when sending back, instead of always sending back
# to the first step (translate/subtitle) go to the
# step before this one:
# Translate/Subtitle -> Review -> Approve
# also, you can just send back approve and review tasks.
if self.type == Task.TYPE_IDS['Approve'] and self.workflow.review_enabled:
type = Task.TYPE_IDS['Review']
else:
is_primary = (self.new_subtitle_version
.subtitle_language
.is_primary_audio_language())
if is_primary:
type = Task.TYPE_IDS['Subtitle']
else:
type = Task.TYPE_IDS['Translate']
# let's guess which assignee should we use
# by finding the last user that did this task type
previous_task = Task.objects.complete().filter(
team_video=self.team_video, language=self.language, team=self.team, type=type
).order_by('-completed')[:1]
if previous_task:
assignee = previous_task[0].assignee
else:
assignee = None
# The target assignee may have left the team in the mean time.
if not self.team.members.filter(user=assignee).exists():
assignee = None
task = Task(team=self.team, team_video=self.team_video,
language=self.language, type=type,
assignee=assignee)
task.new_subtitle_version = self.new_subtitle_version
task.set_expiration()
task.save()
if sends_notification:
# notify original submiter (assignee of self)
notifier.reviewed_and_sent_back.delay(self.pk)
return task
def complete_approved(self, user):
"""Mark a review/approve task as Approved and complete it.
:param user: user who is approving he task
:returns: next task in the workflow.
"""
self.assignee = user
self.approved = Task.APPROVED_IDS['Approved']
return self.complete()
def complete_rejected(self, user):
"""Mark a review/approve task as Rejected and complete it.
:param user: user who is approving he task
:returns: next task in the workflow.
"""
self.assignee = user
self.approved = Task.APPROVED_IDS['Rejected']
return self.complete()
def complete(self):
'''Mark as complete and return the next task in the process if applicable.'''
self.completed = self.now()
self.save()
return { 'Subtitle': self._complete_subtitle,
'Translate': self._complete_translate,
'Review': self._complete_review,
'Approve': self._complete_approve,
}[Task.TYPE_NAMES[self.type]]()
def _can_publish_directly(self, subtitle_version):
from teams.permissions import can_publish_edits_immediately
type = {10: 'Review',
20: 'Review',
30: 'Approve'}.get(self.type)
tasks = (Task.objects._type([type], True, 'Approved')
.filter(language=self.language))
return (can_publish_edits_immediately(self.team_video,
self.assignee,
self.language) and
subtitle_version and
subtitle_version.previous_version() and
subtitle_version.previous_version().is_public() and
subtitle_version.subtitle_language.is_complete_and_synced() and
tasks.exists())
def _find_previous_assignee(self, type):
"""Find the previous assignee for a new review/approve task for this video.
NOTE: This is different than finding out the person to send a task back
to! This is for saying "who reviewed this task last time?".
For now, we'll assign the review/approval task to whomever did it last
time (if it was indeed done), but only if they're still eligible to
perform it now.
"""
from teams.permissions import can_review, can_approve
if type == 'Approve':
# Check if this is a post-publish edit.
# According to #1039 we don't wanna auto-assign the assignee
version = self.get_subtitle_version()
if (version and
version.is_public() and
version.subtitle_language.is_complete_and_synced()):
return None
type = Task.TYPE_IDS['Approve']
can_do = can_approve
elif type == 'Review':
type = Task.TYPE_IDS['Review']
can_do = partial(can_review, allow_own=True)
else:
return None
last_task = self.team_video.task_set.complete().filter(
language=self.language, type=type
).order_by('-completed')[:1]
if last_task:
candidate = last_task[0].assignee
if candidate and can_do(self.team_video, candidate, self.language):
return candidate
def _complete_subtitle(self):
"""Handle the messy details of completing a subtitle task."""
sv = self.get_subtitle_version()
# TL;DR take a look at #1206 to know why i did this
if self.workflow.requires_review_or_approval and not self._can_publish_directly(sv):
if self.workflow.review_enabled:
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Review'],
assignee=self._find_previous_assignee('Review'))
task.set_expiration()
task.save()
elif self.workflow.approve_enabled:
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Approve'],
assignee=self._find_previous_assignee('Approve'))
task.set_expiration()
task.save()
else:
# Subtitle task is done, and there is no approval or review
# required, so we mark the version as approved.
sv.publish()
# We need to make sure this is updated correctly here.
from videos import metadata_manager
metadata_manager.update_metadata(self.team_video.video.pk)
if self.workflow.autocreate_translate:
# TODO: Switch to autocreate_task?
_create_translation_tasks(self.team_video, sv)
task = None
return task
def _complete_translate(self):
"""Handle the messy details of completing a translate task."""
sv = self.get_subtitle_version()
# TL;DR take a look at #1206 to know why i did this
if self.workflow.requires_review_or_approval and not self._can_publish_directly(sv):
if self.workflow.review_enabled:
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Review'],
assignee=self._find_previous_assignee('Review'))
task.set_expiration()
task.save()
elif self.workflow.approve_enabled:
# The review step may be disabled. If so, we check the approve step.
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Approve'],
assignee=self._find_previous_assignee('Approve'))
task.set_expiration()
task.save()
else:
sv.publish()
# We need to make sure this is updated correctly here.
from videos import metadata_manager
metadata_manager.update_metadata(self.team_video.video.pk)
task = None
return task
def _complete_review(self):
"""Handle the messy details of completing a review task."""
approval = self.approved == Task.APPROVED_IDS['Approved']
sv = self.get_subtitle_version()
if approval:
self._ensure_language_complete(sv.subtitle_language)
self._add_comment()
task = None
if self.workflow.approve_enabled:
# Approval is enabled, so...
if approval:
# If the reviewer thought these subtitles were good we create
# the next task.
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Approve'],
assignee=self._find_previous_assignee('Approve'))
task.set_expiration()
task.save()
# Notify the appropriate users.
notifier.reviewed_and_pending_approval.delay(self.pk)
else:
# Otherwise we send the subtitles back for improvement.
task = self._send_back()
else:
# Approval isn't enabled, so the ruling of this Review task
# determines whether the subtitles go public.
if approval:
# Make these subtitles public!
self.new_subtitle_version.publish()
# If the subtitles are okay, go ahead and autocreate translation
# tasks if necessary.
if self.workflow.autocreate_translate:
_create_translation_tasks(self.team_video, sv)
# Notify the appropriate users and external services.
notifier.reviewed_and_published.delay(self.pk)
else:
# Send the subtitles back for improvement.
task = self._send_back()
# Before we go, we need to record who reviewed these subtitles, so if
# necessary we can "send back" to them later.
if self.assignee:
sv.set_reviewed_by(self.assignee)
return task
def do_complete_approve(self, lang_ct=None):
return self._complete_approve(lang_ct=lang_ct)
def _complete_approve(self, lang_ct=None):
"""Handle the messy details of completing an approve task."""
approval = self.approved == Task.APPROVED_IDS['Approved']
sv = self.get_subtitle_version()
if approval:
self._ensure_language_complete(sv.subtitle_language)
self._add_comment(lang_ct=lang_ct)
if approval:
# The subtitles are acceptable, so make them public!
self.new_subtitle_version.publish()
# Create translation tasks if necessary.
if self.workflow.autocreate_translate:
_create_translation_tasks(self.team_video, sv)
task = None
# Notify the appropriate users.
notifier.approved_notification.delay(self.pk, approval)
else:
# Send the subtitles back for improvement.
task = self._send_back()
# Before we go, we need to record who approved these subtitles, so if
# necessary we can "send back" to them later.
if self.assignee:
sv.set_approved_by(self.assignee)
if approval:
api_subtitles_approved.send(sv)
else:
api_subtitles_rejected.send(sv)
return task
def _ensure_language_complete(self, subtitle_language):
if not subtitle_language.subtitles_complete:
subtitle_language.subtitles_complete = True
subtitle_language.save()
def get_perform_url(self):
"""Return a URL for whatever dialog is used to perform this task."""
return reverse('teams:perform_task', args=(self.team.slug, self.id))
def tasks_page_perform_link_text(self):
"""Get the link text for perform link on the tasks page."""
if self.assignee:
return _('Resume')
else:
return _('Start now')
def get_widget_url(self):
"""Get the URL to edit the video for this task. """
return reverse("subtitles:subtitle-editor", kwargs={
"video_id": self.team_video.video.video_id,
"language_code": self.language
})
def needs_start_dialog(self):
"""Check if this task needs the start dialog.
The only time we need it is when a user is starting a
transcribe/translate task. We don't need it for review/approval, or
if the task is being resumed.
"""
# We use the start dialog for select two things:
# - primary audio language
# - language of the subtitles
return (self.language == '' or
self.team_video.video.primary_audio_language_code == '')
def get_reviewer(self):
"""For Approve tasks, return the last user to Review these subtitles.
May be None if this task is not an Approve task, or if we can't figure
out the last reviewer for any reason.
"""
if self.get_type_display() == 'Approve':
previous = Task.objects.complete().filter(
team_video=self.team_video,
language=self.language,
team=self.team,
type=Task.TYPE_IDS['Review']).order_by('-completed')[:1]
if previous:
return previous[0].assignee
def set_expiration(self):
"""Set the expiration_date of this task. Does not save().
Requires that self.team and self.assignee be set correctly.
"""
if not self.assignee or not self.team.task_expiration:
self.expiration_date = None
else:
limit = datetime.timedelta(days=self.team.task_expiration)
self.expiration_date = self.now() + limit
def get_subtitle_version(self):
""" Gets the subtitle version related to this task.
If the task has a subtitle_version attached, return it and
if not, try to find it throught the subtitle language of the video.
Note: we need this since we don't attach incomplete subtitle_version
to the task (and if we do we need to set the status to unmoderated and
that causes the version to get published).
"""
# autocreate sets the subtitle_version to another
# language's subtitle_version and that was breaking
# not only the interface but the new upload method.
if (self.new_subtitle_version and
self.new_subtitle_version.language_code == self.language):
return self.new_subtitle_version
if not hasattr(self, "_subtitle_version"):
language = self.team_video.video.subtitle_language(self.language)
self._subtitle_version = (language.get_tip(public=False)
if language else None)
return self._subtitle_version
def is_blocked(self):
"""Return whether this task is "blocked".
"Blocked" means that it's a translation task but the source language
isn't ready to be translated yet.
"""
subtitle_version = self.get_subtitle_version()
if not subtitle_version:
return False
source_language = subtitle_version.subtitle_language.get_translation_source_language()
if not source_language:
return False
can_perform = (source_language and
source_language.is_complete_and_synced())
if self.get_type_display() != 'Translate':
if self.get_type_display() in ('Review', 'Approve'):
# review and approve tasks will be blocked if they're
# a translation and they have a draft and the source
# language no longer has published version
if not can_perform or source_language.language_code == self.language:
return True
return not can_perform
def save(self, update_team_video_index=True, *args, **kwargs):
is_review_or_approve = self.get_type_display() in ('Review', 'Approve')
if self.language:
if not self.language in translation.ALL_LANGUAGE_CODES:
raise ValidationError(
"Subtitle Language should be a valid code.")
result = super(Task, self).save(*args, **kwargs)
if update_team_video_index:
tasks.update_one_team_video.delay(self.team_video.pk)
Video.cache.invalidate_by_pk(self.team_video.video_id)
return result
# Settings
class SettingManager(models.Manager):
use_for_related_fields = True
def guidelines(self):
"""Return a QS of settings related to team guidelines."""
keys = [key for key, name in Setting.KEY_CHOICES
if name.startswith('guidelines_')]
return self.get_query_set().filter(key__in=keys)
def messages(self):
"""Return a QS of settings related to team messages."""
keys = [key for key, name in Setting.KEY_CHOICES
if name.startswith('messages_')]
return self.get_query_set().filter(key__in=keys)
def messages_guidelines(self):
"""Return a QS of settings related to team messages or guidelines."""
return self.get_query_set().filter(key__in=Setting.MESSAGE_KEYS)
def with_names(self, names):
return self.filter(key__in=[Setting.KEY_IDS[name] for name in names])
def all_messages(self):
messages = {}
for key in Setting.MESSAGE_KEYS:
name = Setting.KEY_NAMES[key]
messages[name] = self.instance.get_default_message(name)
messages.update({
s.key_name: s.data
for s in self.messages_guidelines()
if s.data
})
return messages
class Setting(models.Model):
KEY_CHOICES = (
(100, 'messages_invite'),
(101, 'messages_manager'),
(102, 'messages_admin'),
(103, 'messages_application'),
(104, 'messages_joins'),
(200, 'guidelines_subtitle'),
(201, 'guidelines_translate'),
(202, 'guidelines_review'),
# 300s means if this team will block those notifications
(300, 'block_invitation_sent_message'),
(301, 'block_application_sent_message'),
(302, 'block_application_denided_message'),
(303, 'block_team_member_new_message'),
(304, 'block_team_member_leave_message'),
(305, 'block_task_assigned_message'),
(306, 'block_reviewed_and_published_message'),
(307, 'block_reviewed_and_pending_approval_message'),
(308, 'block_reviewed_and_sent_back_message'),
(309, 'block_approved_message'),
(310, 'block_new_video_message'),
# 400 is for text displayed on web pages
(401, 'pagetext_welcome_heading'),
)
KEY_NAMES = dict(KEY_CHOICES)
KEY_IDS = dict([choice[::-1] for choice in KEY_CHOICES])
MESSAGE_KEYS = [
key for key, name in KEY_CHOICES
if name.startswith('messages_') or name.startswith('guidelines_')
or name.startswith('pagetext_')
]
MESSAGE_DEFAULTS = {
'pagetext_welcome_heading': _("Help %(team)s reach a world audience"),
}
key = models.PositiveIntegerField(choices=KEY_CHOICES)
data = models.TextField(blank=True)
team = models.ForeignKey(Team, related_name='settings')
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
objects = SettingManager()
class Meta:
unique_together = (('key', 'team'),)
def __unicode__(self):
return u'%s - %s' % (self.team, self.key_name)
@property
def key_name(self):
"""Return the key name for this setting.
TODO: Remove this and replace with get_key_display()?
"""
return Setting.KEY_NAMES[self.key]
# TeamLanguagePreferences
class TeamLanguagePreferenceManager(models.Manager):
def _generate_writable(self, team):
"""Return the set of language codes that are writeable for this team."""
unwritable = self.for_team(team).filter(allow_writes=False, preferred=False).values("language_code")
unwritable = set([x['language_code'] for x in unwritable])
return translation.ALL_LANGUAGE_CODES - unwritable
def _generate_readable(self, team):
"""Return the set of language codes that are readable for this team."""
unreadable = self.for_team(team).filter(allow_reads=False, preferred=False).values("language_code")
unreadable = set([x['language_code'] for x in unreadable])
return translation.ALL_LANGUAGE_CODES - unreadable
def _generate_preferred(self, team):
"""Return the set of language codes that are preferred for this team."""
preferred = self.for_team(team).filter(preferred=True).values("language_code")
return set([x['language_code'] for x in preferred])
def for_team(self, team):
"""Return a QS of all language preferences for the given team."""
return self.get_query_set().filter(team=team)
def on_changed(cls, sender, instance, *args, **kwargs):
"""Perform any necessary actions when a language preference changes.
TODO: Refactor this out of the manager...
"""
from teams.cache import invalidate_lang_preferences
invalidate_lang_preferences(instance.team)
def get_readable(self, team):
"""Return the set of language codes that are readable for this team.
This value may come from memcache if possible.
"""
from teams.cache import get_readable_langs
return get_readable_langs(team)
def get_writable(self, team):
"""Return the set of language codes that are writeable for this team.
This value may come from memcache if possible.
"""
from teams.cache import get_writable_langs
return get_writable_langs(team)
def get_preferred(self, team):
"""Return the set of language codes that are preferred for this team.
This value may come from memcache if possible.
"""
from teams.cache import get_preferred_langs
return get_preferred_langs(team)
class TeamLanguagePreference(models.Model):
"""Represent language preferences for a given team.
First, TLPs may mark a language as "preferred". If that's the case then the
other attributes of this model are irrelevant and can be ignored.
"Preferred" languages will have translation tasks automatically created for
them when subtitles are added.
If preferred is False, the TLP describes a *restriction* on the language
instead. Writing in that language may be prevented, or both reading and
writing may be prevented.
(Note: "writing" means not only writing new subtitles but also creating
tasks, etc)
This is how the restriction settings should interact. TLP means that we
have created a TeamLanguagePreference for that team and language.
| Action | NO | allow_read=True, | allow_read=False, |
| | TLP | allow_write=False | allow_write=False |
========================================================================================
| assignable as tasks | X | | |
| assignable as narrowing | X | | |
| listed on the widget for viewing | X | X | |
| listed on the widget for improving | X | | |
| returned from the api read operations | X | X | |
| upload / write operations from the api | X | | |
| show up on the start dialog | X | | |
+----------------------------------------+-----+-------------------+-------------------+
Remember, this table only applies if preferred=False. If the language is
preferred the "restriction" attributes are effectively garbage. Maybe we
should make the column nullable to make this more clear?
allow_read=True, allow_write=True, preferred=False is invalid. Just remove
the row all together.
"""
team = models.ForeignKey(Team, related_name="lang_preferences")
language_code = models.CharField(max_length=16)
allow_reads = models.BooleanField(default=False)
allow_writes = models.BooleanField(default=False)
preferred = models.BooleanField(default=False)
objects = TeamLanguagePreferenceManager()
class Meta:
unique_together = ('team', 'language_code')
def clean(self, *args, **kwargs):
if self.allow_reads and self.allow_writes:
raise ValidationError("No sense in having all allowed, just remove the preference for this language.")
if self.preferred and (self.allow_reads or self.allow_writes):
raise ValidationError("Cannot restrict a preferred language.")
super(TeamLanguagePreference, self).clean(*args, **kwargs)
def __unicode__(self):
return u"%s preference for team %s" % (self.language_code, self.team)
post_save.connect(TeamLanguagePreference.objects.on_changed, TeamLanguagePreference)
# TeamNotificationSettings
class TeamNotificationSettingManager(models.Manager):
def notify_team(self, team_pk, event_name, **kwargs):
"""Notify the given team of a given event.
Finds the matching notification settings for this team, instantiates
the notifier class, and sends the appropriate notification.
If the notification settings has an email target, sends an email.
If the http settings are filled, then sends the request.
This can be ran as a Celery task, as it requires no objects to be passed.
"""
try:
team = Team.objects.get(pk=team_pk)
except Team.DoesNotExist:
logger.error("A pk for a non-existent team was passed in.",
extra={"team_pk": team_pk, "event_name": event_name})
return
try:
if team.partner:
notification_settings = self.get(partner=team.partner)
else:
notification_settings = self.get(team=team)
except TeamNotificationSetting.DoesNotExist:
return
notification_settings.notify(event_name, **kwargs)
class TeamNotificationSetting(models.Model):
"""Info on how a team should be notified of changes to its videos.
For now, a team can be notified by having a http request sent with the
payload as the notification information. This cannot be hardcoded since
teams might have different urls for each environment.
Some teams have strict requirements on mapping video ids to their internal
values, and also their own language codes. Therefore we need to configure
a class that can do the correct mapping.
TODO: allow email notifications
"""
EVENT_VIDEO_NEW = "video-new"
EVENT_VIDEO_EDITED = "video-edited"
EVENT_LANGUAGE_NEW = "language-new"
EVENT_LANGUAGE_EDITED = "language-edit"
EVENT_LANGUAGE_DELETED = "language-deleted"
EVENT_SUBTITLE_NEW = "subs-new"
EVENT_SUBTITLE_APPROVED = "subs-approved"
EVENT_SUBTITLE_REJECTED = "subs-rejected"
EVENT_APPLICATION_NEW = 'application-new'
team = models.OneToOneField(Team, related_name="notification_settings",
null=True, blank=True)
partner = models.OneToOneField('Partner',
related_name="notification_settings", null=True, blank=True)
# the url to post the callback notifing partners of new video activity
request_url = models.URLField(blank=True, null=True)
basic_auth_username = models.CharField(max_length=255, blank=True, null=True)
basic_auth_password = models.CharField(max_length=255, blank=True, null=True)
# not being used, here to avoid extra migrations in the future
email = models.EmailField(blank=True, null=True)
# integers mapping to classes, see unisubs-integration/notificationsclasses.py
notification_class = models.IntegerField(default=1,)
objects = TeamNotificationSettingManager()
def get_notification_class(self):
try:
from ted.notificationclasses import NOTIFICATION_CLASS_MAP
return NOTIFICATION_CLASS_MAP[self.notification_class]
except ImportError:
logger.exception("Apparently unisubs-integration is not installed")
def notify(self, event_name, **kwargs):
"""Resolve the notification class for this setting and fires notfications."""
notification_class = self.get_notification_class()
if not notification_class:
logger.error("Could not find notification class %s" % self.notification_class)
return
notification = notification_class(self.team, self.partner,
event_name, **kwargs)
if self.request_url:
success, content = notification.send_http_request(
self.request_url,
self.basic_auth_username,
self.basic_auth_password
)
return success, content
# FIXME: spec and test this, for now just return
return
def __unicode__(self):
if self.partner:
return u'NotificationSettings for partner %s' % self.partner
return u'NotificationSettings for team %s' % self.team
class BillingReport(models.Model):
# use BillingRecords to signify completed work
TYPE_BILLING_RECORD = 2
# use approval tasks to signify completed work
TYPE_APPROVAL = 3
# Like TYPE_APPROVAL, but centered on the users who subtitle/review the
# work
TYPE_APPROVAL_FOR_USERS = 4
TYPE_CHOICES = (
(TYPE_BILLING_RECORD, 'Crowd sourced'),
(TYPE_APPROVAL, 'Professional services'),
(TYPE_APPROVAL_FOR_USERS, 'On-demand translators'),
)
teams = models.ManyToManyField(Team, related_name='billing_reports')
start_date = models.DateField()
end_date = models.DateField()
csv_file = S3EnabledFileField(blank=True, null=True,
upload_to='teams/billing/')
processed = models.DateTimeField(blank=True, null=True)
type = models.IntegerField(choices=TYPE_CHOICES,
default=TYPE_BILLING_RECORD)
def __unicode__(self):
if hasattr(self, 'id') and self.id is not None:
team_count = self.teams.all().count()
else:
team_count = 0
return "%s teams (%s - %s)" % (team_count,
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
def _get_approved_tasks(self):
return Task.objects.complete_approve().filter(
approved=Task.APPROVED_IDS['Approved'],
team__in=self.teams.all(),
completed__range=(self.start_date, self.end_date))
def _report_date(self, datetime):
return datetime.strftime('%Y-%m-%d %H:%M:%S')
def generate_rows_type_approval(self):
header = (
'Team',
'Video Title',
'Video ID',
'Project',
'Language',
'Minutes',
'Original',
'Translation?',
'Approver',
'Date',
)
rows = [header]
for approve_task in self._get_approved_tasks():
video = approve_task.team_video.video
project = approve_task.team_video.project.name if approve_task.team_video.project else 'none'
version = approve_task.new_subtitle_version
language = version.subtitle_language
subtitle_task = (Task.objects.complete_subtitle_or_translate()
.filter(team_video=approve_task.team_video,
language=approve_task.language)
.order_by('-completed'))[0]
rows.append((
approve_task.team.name,
video.title_display(),
video.video_id,
project,
approve_task.language,
get_minutes_for_version(version, False),
language.is_primary_audio_language(),
subtitle_task.type==Task.TYPE_IDS['Translate'],
unicode(approve_task.assignee),
self._report_date(approve_task.completed),
))
return rows
def generate_rows_type_approval_for_users(self):
header = (
'User',
'Task Type',
'Team',
'Video Title',
'Video ID',
'Project',
'Language',
'Minutes',
'Original',
'Approver',
'Note',
'Date',
'Pay Rate',
)
data_rows = []
for approve_task in self._get_approved_tasks():
video = approve_task.team_video.video
project = approve_task.team_video.project.name if approve_task.team_video.project else 'none'
version = approve_task.get_subtitle_version()
language = version.subtitle_language
all_tasks = [approve_task]
try:
all_tasks.append((Task.objects.complete_subtitle_or_translate()
.filter(team_video=approve_task.team_video,
language=approve_task.language)
.order_by('-completed'))[0])
except IndexError:
# no subtitling task, probably the review task was manually
# created.
pass
try:
all_tasks.append((Task.objects.complete_review()
.filter(team_video=approve_task.team_video,
language=approve_task.language)
.order_by('-completed'))[0])
except IndexError:
# review not enabled
pass
for task in all_tasks:
data_rows.append((
unicode(task.assignee),
task.get_type_display(),
approve_task.team.name,
video.title_display(),
video.video_id,
project,
language.language_code,
get_minutes_for_version(version, False),
language.is_primary_audio_language(),
unicode(approve_task.assignee),
unicode(task.body),
self._report_date(task.completed),
task.assignee.pay_rate_code,
))
data_rows.sort(key=lambda row: row[0])
return [header] + data_rows
def generate_rows_type_billing_record(self):
rows = []
for i,team in enumerate(self.teams.all()):
rows = rows + BillingRecord.objects.csv_report_for_team(team,
self.start_date, self.end_date, add_header=i == 0)
return rows
def generate_rows(self):
if self.type == BillingReport.TYPE_BILLING_RECORD:
rows = self.generate_rows_type_billing_record()
elif self.type == BillingReport.TYPE_APPROVAL:
rows = self.generate_rows_type_approval()
elif self.type == BillingReport.TYPE_APPROVAL_FOR_USERS:
rows = self.generate_rows_type_approval_for_users()
else:
raise ValueError("Unknown type: %s" % self.type)
return rows
def convert_unicode_to_utf8(self, rows):
def _convert(value):
if isinstance(value, unicode):
return value.encode("utf-8")
else:
return value
return [tuple(_convert(v) for v in row) for row in rows]
def process(self):
"""
Generate the correct rows (including headers), saves it to a tempo file,
then set's that file to the csv_file property, which if , using the S3
storage will take care of exporting it to s3.
"""
try:
rows = self.generate_rows()
except StandardError:
logger.error("Error generating billing report: (id: %s)", self.id)
self.csv_file = None
else:
self.csv_file = self.make_csv_file(rows)
self.processed = datetime.datetime.utcnow()
self.save()
def make_csv_file(self, rows):
rows = self.convert_unicode_to_utf8(rows)
fn = '/tmp/bill-%s-teams-%s-%s-%s-%s.csv' % (
self.teams.all().count(),
self.start_str, self.end_str,
self.get_type_display(), self.pk)
with open(fn, 'w') as f:
writer = csv.writer(f)
writer.writerows(rows)
return File(open(fn, 'r'))
@property
def start_str(self):
return self.start_date.strftime("%Y%m%d")
@property
def end_str(self):
return self.end_date.strftime("%Y%m%d")
class BillingReportGenerator(object):
def __init__(self, all_records, add_header=True):
if add_header:
self.rows = [self.header()]
else:
self.rows = []
all_records = list(all_records)
self.make_language_number_map(all_records)
self.make_languages_without_records(all_records)
for video, records in groupby(all_records, lambda r: r.video):
records = list(records)
if video:
for lang in self.languages_without_records.get(video.id, []):
self.rows.append(
self.make_row_for_lang_without_record(video, lang))
for r in records:
self.rows.append(self.make_row(video, r))
def header(self):
return [
'Video Title',
'Video ID',
'Project',
'Language',
'Minutes',
'Original',
'Language number',
'Team',
'Created',
'Source',
'User',
]
def make_row(self, video, record):
return [
(video and video.title_display()) or "----",
(video and video.video_id) or "deleted",
(record.project.name if record.project else 'none'),
(record.new_subtitle_language and record.new_subtitle_language.language_code) or "----",
record.minutes,
record.is_original,
(self.language_number_map and (record.id in self.language_number_map) and self.language_number_map[record.id]) or "----",
record.team.slug,
record.created.strftime('%Y-%m-%d %H:%M:%S'),
record.source,
record.user.username,
]
def make_language_number_map(self, records):
self.language_number_map = {}
videos = set(r.video for r in records)
video_counts = dict((v and v.id, 0) for v in videos)
qs = (BillingRecord.objects
.filter(video__in=videos)
.order_by('created'))
for record in qs:
vid = record.video and record.video.id
video_counts[vid] += 1
self.language_number_map[record.id] = video_counts[vid]
def make_languages_without_records(self, records):
self.languages_without_records = {}
videos = [r.video for r in records]
language_ids = [r.new_subtitle_language_id for r in records]
no_billing_record_where = """\
NOT EXISTS (
SELECT 1
FROM teams_billingrecord br
WHERE br.new_subtitle_language_id = subtitles_subtitlelanguage.id
)"""
qs = (NewSubtitleLanguage.objects
.filter(video__in=videos, subtitles_complete=True)
.exclude(id__in=language_ids).
extra(where=[no_billing_record_where]))
for lang in qs:
vid = lang.video_id
if vid not in self.languages_without_records:
self.languages_without_records[vid] = [lang]
else:
self.languages_without_records[vid].append(lang)
def make_row_for_lang_without_record(self, video, language):
return [
video.title_display(),
video.video_id,
'none',
language.language_code,
0,
language.is_primary_audio_language(),
0,
'unknown',
language.created.strftime('%Y-%m-%d %H:%M:%S'),
'unknown',
'unknown',
]
class BillingRecordManager(models.Manager):
def data_for_team(self, team, start, end):
return self.filter(team=team, created__gte=start, created__lte=end)
def csv_report_for_team(self, team, start, end, add_header=True):
all_records = self.data_for_team(team, start, end)
generator = BillingReportGenerator(all_records, add_header)
return generator.rows
def insert_records_for_translations(self, billing_record):
"""
IF you've translated from an incomplete language, and later on that
language is completed, we must check if any translations are now
complete and therefore should have billing records with them
"""
translations = billing_record.new_subtitle_language.get_dependent_subtitle_languages()
inserted = []
for translation in translations:
version = translation.get_tip(public=False)
if version:
inserted.append(self.insert_record(version))
return filter(bool, inserted)
def insert_record(self, version):
"""
Figures out if this version qualifies for a billing record, and
if so creates one. This should be self contained, e.g. safe to call
for any version. No records should be created if not needed, and it
won't create multiples.
If this language has translations it will check if any of those are now
eligible for BillingRecords and create one accordingly.
"""
from teams.models import BillingRecord
celery_logger.debug('insert billing record')
language = version.subtitle_language
video = language.video
tv = video.get_team_video()
if not tv:
celery_logger.debug('not a team video')
return
if not language.is_complete_and_synced(public=False):
celery_logger.debug('language not complete')
return
try:
# we already have a record
previous_record = BillingRecord.objects.get(video=video,
new_subtitle_language=language)
# make sure we update it
celery_logger.debug('a billing record for this language exists')
previous_record.is_original = \
video.primary_audio_language_code == language.language_code
previous_record.save()
return
except BillingRecord.DoesNotExist:
pass
if NewSubtitleVersion.objects.filter(
subtitle_language=language,
created__lt=BILLING_CUTOFF).exclude(
pk=version.pk).exists():
celery_logger.debug('an older version exists')
return
is_original = language.is_primary_audio_language()
source = version.origin
team = tv.team
project = tv.project
new_record = BillingRecord.objects.create(
video=video,
project = project,
new_subtitle_version=version,
new_subtitle_language=language,
is_original=is_original, team=team,
created=version.created,
source=source,
user=version.author)
from_translations = self.insert_records_for_translations(new_record)
return new_record, from_translations
def get_minutes_for_version(version, round_up_to_integer):
"""
Return the number of minutes the subtitles specified in version
"""
subs = version.get_subtitles()
if len(subs) == 0:
return 0
for sub in subs:
if sub.start_time is not None:
start_time = sub.start_time
break
# we shouldn't have an end time set without a start time, but handle
# it just in case
if sub.end_time is not None:
start_time = sub.end_time
break
else:
return 0
for sub in reversed(subs):
if sub.end_time is not None:
end_time = sub.end_time
break
# we shouldn't have an end time not set, but check for that just in
# case
if sub.start_time is not None:
end_time = sub.start_time
break
else:
return 0
duration_seconds = (end_time - start_time) / 1000.0
minutes = duration_seconds/60.0
if round_up_to_integer:
minutes = int(ceil(minutes))
return minutes
class BillingRecord(models.Model):
# The billing record should still exist if the video is deleted
video = models.ForeignKey(Video, blank=True, null=True, on_delete=models.SET_NULL)
project = models.ForeignKey(Project, blank=True, null=True, on_delete=models.SET_NULL)
subtitle_version = models.ForeignKey(SubtitleVersion, null=True,
blank=True, on_delete=models.SET_NULL)
new_subtitle_version = models.ForeignKey(NewSubtitleVersion, null=True,
blank=True, on_delete=models.SET_NULL)
subtitle_language = models.ForeignKey(SubtitleLanguage, null=True,
blank=True, on_delete=models.SET_NULL)
new_subtitle_language = models.ForeignKey(NewSubtitleLanguage, null=True,
blank=True, on_delete=models.SET_NULL)
minutes = models.FloatField(blank=True, null=True)
is_original = models.BooleanField(default=False)
team = models.ForeignKey(Team)
created = models.DateTimeField()
source = models.CharField(max_length=255)
user = models.ForeignKey(User)
objects = BillingRecordManager()
class Meta:
unique_together = ('video', 'new_subtitle_language')
def __unicode__(self):
return "%s - %s" % (self.video and self.video.video_id,
self.new_subtitle_language and self.new_subtitle_language.language_code)
def save(self, *args, **kwargs):
if not self.minutes and self.minutes != 0.0:
self.minutes = self.get_minutes()
assert self.minutes is not None
return super(BillingRecord, self).save(*args, **kwargs)
def get_minutes(self):
return get_minutes_for_version(self.new_subtitle_version, True)
class Partner(models.Model):
name = models.CharField(_(u'name'), max_length=250, unique=True)
slug = models.SlugField(_(u'slug'), unique=True)
can_request_paid_captions = models.BooleanField(default=False)
# The `admins` field specifies users who can do just about anything within
# the partner realm.
admins = models.ManyToManyField('auth.CustomUser',
related_name='managed_partners', blank=True, null=True)
def __unicode__(self):
return self.name
def is_admin(self, user):
return user in self.admins.all()
| agpl-3.0 | -5,704,561,719,830,586,000 | 36.958653 | 232 | 0.608513 | false |
codedsk/hubcheck | hubcheck/pageobjects/po_time_overview_page.py | 1 | 1349 | from hubcheck.pageobjects.po_time_base_page import TimeBasePage
from hubcheck.pageobjects.basepageelement import Link
class TimeOverviewPage(TimeBasePage):
"""time overview page"""
def __init__(self,browser,catalog,groupid=None):
super(TimeOverviewPage,self).__init__(browser,catalog)
self.path = "/time/overview"
# load hub's classes
TimeOverviewPage_Locators = self.load_class('TimeOverviewPage_Locators')
TimeOverview = self.load_class('TimeOverview')
# update this object's locator
self.locators.update(TimeOverviewPage_Locators.locators)
# setup page object's components
self.overview = TimeOverview(self,{'base':'overview'})
def get_active_hubs_count(self):
return self.overview.get_active_hubs_count()
def get_active_tasks_count(self):
return self.overview.get_active_tasks_count()
def get_total_hours_count(self):
return self.overview.get_total_hours_count()
def goto_hubs(self):
self.overview.goto_hubs()
def goto_tasks(self):
self.overview.goto_tasks()
def goto_records(self):
self.overview.goto_records()
class TimeOverviewPage_Locators_Base(object):
"""locators for TimeOverviewPage object"""
locators = {
'overview' : "css=#plg_time_overview",
}
| mit | 2,818,290,263,119,417,000 | 28.977778 | 80 | 0.673091 | false |
madeso/prettygood | dotnet/Tagger/TagValidator.py | 1 | 2567 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using MusicBrainz;
using PrettyGood.Util;
namespace Tagger
{
class TagValidator
{
public bool validate(IdTag tag)
{
Artist artist = null;
if (string.IsNullOrEmpty(tag.Artist) == false) artist = getArtist(tag.Artist);
if (artist == null) return false;
Release album = null;
if (string.IsNullOrEmpty(tag.Album) == false) album = getRelease(artist, tag.Album);
Track track = null;
if (string.IsNullOrEmpty(tag.Title))
{
int num = int.Parse(tag.TrackNumber.RemoveLeadingZeros());
num %= 100;
track = album.GetTracks()[num];
}
else
{
foreach (var t in Track.Query(tag.Title, artist.GetName()))
{
track = t;
break;
}
}
if (track == null) return false;
if (album == null)
{
foreach (var r in track.GetReleases())
{
album = r;
break;
}
}
tag.Artist = artist.GetName();
tag.Album = album.GetTitle();
tag.TrackNumber = track.GetTrackNumber(album).ToString();
tag.TotalTracks = album.GetTracks().Count.ToString();
//tag.Year = album.GetReleaseRelations()[0].BeginDate;
return true;
}
private Release getRelease(Artist artist, string a)
{
string album = a.ToLower();
foreach (Release r in artist.GetReleases())
{
if (album == r.GetTitle().ToLower()) return r;
}
return null;
}
private Artist getArtist(string art)
{
string artist = art.ToLower();
if (artists.ContainsKey(artist)) return artists[artist];
Artist info = null;
System.Threading.Thread.Sleep(500);
foreach (Artist a in Artist.Query(artist))
{
string name = a.GetName();
if (artist.Contains(name.ToLower()))
{
info = a;
break;
}
}
artists.Add(artist, info);
return info;
}
Dictionary<string, Artist> artists = new Dictionary<string, Artist>();
}
}
| mit | -570,241,561,569,758,850 | 27.186813 | 96 | 0.475244 | false |
clasnake/recommender | similarity.py | 1 | 6369 | from __future__ import division
from math import sqrt
def sim_distance(prefs, item1, item2):
#get the list of shared items
si = {};
for item in prefs[item1]:
if item in prefs[item2]:
si[item] = 1;
#if they have no shared items,return 0;
if len(si) == 0: return 0;
#Add the squares of all the differences
sum_of_squares = sum(
[pow(prefs[item1][item] - prefs[item2][item], 2) for item in prefs[item1] if item in prefs[item2]])
return 1 / (1 + sqrt(sum_of_squares))
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs, p1, p2):
# Get the list of mutually rated items
si = {}
for item in prefs[p1]:
if item in prefs[p2]: si[item] = 1
# if they are no ratings in common, return 0
if len(si) == 0: return 0
# Sum calculations
n = len(si)
# Sums of all the preferences
sum1 = float(sum([prefs[p1][it] for it in si]))
sum2 = float(sum([prefs[p2][it] for it in si]))
# Sums of the squares
sum1Sq = float(sum([pow(prefs[p1][it], 2) for it in si]))
sum2Sq = float(sum([pow(prefs[p2][it], 2) for it in si]))
# Sum of the products
pSum = float(sum([prefs[p1][it] * prefs[p2][it] for it in si]))
# Calculate r (Pearson score)
num = float(pSum - (sum1 * sum2 / n))
den = float(sqrt((sum1Sq - pow(sum1, 2) / n) * (sum2Sq - pow(sum2, 2) / n)))
if den == 0: return 0
r = float(num / den)
return round(r, 7)
def sim_pearson1(prefs, person1, person2):
#get the list of shared items
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item] = 1
#if they have no shared items, return 0
if len(si) == 0: return 0
#find the number of elements
n = len(si)
#add up all the prefs
sum1 = sum([prefs[person1][item] for item in si])
sum2 = sum([prefs[person2][item] for item in si])
#calculate the mean of the critics of p1 and p2
mean1 = sum1 / n;
mean2 = sum2 / n;
#calculate the covariance
covariance = sum([(prefs[person1][item] - mean1) * (prefs[person2][item] - mean2) for item in si]) / n
#calculate the standard_deviation
sd1 = sqrt(sum([pow(prefs[person1][item] - mean1, 2) for item in si]) / n)
sd2 = sqrt(sum([pow(prefs[person2][item] - mean2, 2) for item in si]) / n)
if sd1 * sd2 == 0: return 0
#calculate the pearson correlation improved
pearson = (covariance / (sd1 * sd2))
return pearson
def sim_pearson_improved(prefs, person1, person2):
#get the list of shared items
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item] = 1
#if they have no shared items, return 0
if len(si) == 0: return 0
#find the number of elements
n = len(si)
#get the count of items rated by person
count1 = 0
count2 = 0
for person in prefs[person1]:
count1 += 1
for item in prefs[person2]:
count2 += 1
totalCount = count1 + count2 - n
#add up all the prefs
sum1 = sum([prefs[person1][item] for item in si])
sum2 = sum([prefs[person2][item] for item in si])
#calculate the mean of the critics of p1 and p2
mean1 = sum1 / n;
mean2 = sum2 / n;
#calculate the covariance
covariance = sum([(prefs[person1][item] - mean1) * (prefs[person2][item] - mean2) for item in si]) / n
#calculate the standard_deviation
sd1 = sqrt(sum([pow(prefs[person1][item] - mean1, 2) for item in si]) / n)
sd2 = sqrt(sum([pow(prefs[person2][item] - mean2, 2) for item in si]) / n)
if sd1 * sd2 == 0: return 0
#calculate the pearson correlation improved
pearson = (covariance / (sd1 * sd2)) * (float(n) / float(totalCount))
#print n,count,float(n)/float(count),pearson
return pearson
def sim_cosine(prefs, item1, item2):
si = {}
for i in prefs[item1]:
if i in prefs[item2]:
si[i] = 1
#print si
if len(si) == 0: return 0
x = sqrt(sum([prefs[item1][it] ** 2 for it in si]))
y = sqrt(sum([prefs[item2][it] ** 2 for it in si]))
xy = sum([prefs[item1][it] * prefs[item2][it] for it in si])
cos = xy / (x * y)
return cos
def sim_cosine_improved(prefs, item1, item2):
si = {}
for i in prefs[item1]:
if i in prefs[item2]:
si[i] = 1
#print si
n = len(si)
if n == 0: return 0
count1 = 0
count2 = 0
for item in prefs[item1]:
count1 += 1
for item in prefs[item2]:
count2 += 1
totalCount = count1 + count2 - n
x = sqrt(sum([prefs[item1][it] ** 2 for it in si]))
y = sqrt(sum([prefs[item2][it] ** 2 for it in si]))
xy = sum([prefs[item1][it] * prefs[item2][it] for it in si])
cos = xy / (x * y)
return cos * (float(n) / float(totalCount))
def sim_Jaccard(s1, s2, length):
count = 0
for i in range(0, length):
if s1[i] == '1' and s2[i] == '1':
count += 1
if s1[i] == '1\n' and s2[i] == '1\n':
count += 1
return count / (length - count)
def sim_itemType(s1, s2, length):
count = 0
for i in range(0, length):
if s1[i] == '1' and s2[i] == '1':
count += 1
if s1[i] == '1\n' and s2[i] == '1\n':
count += 1
return count / 5
def sim_cosine_improved_tag(prefs, item1, item2, movieTags):
common = 0
for i in movieTags[item1]:
if i in movieTags[item2]:
common += 1
if common >= 5:
return 0.8
else:
si = {}
for i in prefs[item1]:
if i in prefs[item2]:
si[i] = 1
#print si
n = len(si)
if n == 0: return 0
count1 = 0
count2 = 0
for item in prefs[item1]:
count1 += 1
for item in prefs[item2]:
count2 += 1
totalCount = count1 + count2 - n
x = sqrt(sum([prefs[item1][it] ** 2 for it in si]))
y = sqrt(sum([prefs[item2][it] ** 2 for it in si]))
xy = sum([prefs[item1][it] * prefs[item2][it] for it in si])
cos = xy / (x * y)
return cos * (float(n) / float(totalCount))
#def sim_pearson_improved_typeAdded(prefs,item1,item2):
# pearson_improved=sim_pearson_improved(prefs,item1,item2)
# item_type=itemSimSet[item1][item2]
# return 0.9*(pearson_improved+1)/2.0+0.1*item_type
| mit | 2,447,981,508,787,601,000 | 27.560538 | 107 | 0.566808 | false |
mercycorps/tola-activity | htdocs/indicators/test.py | 1 | 2312 | from django.test import TestCase
from django.test import RequestFactory
from django.test import Client
from indicators.models import Indicator, IndicatorType, Objective, DisaggregationType, ReportingFrequency, CollectedData
from activitydb.models import Program, Sector
from django.contrib.auth.models import User
class IndicatorTestCase(TestCase):
def setUp(self):
new_program = Program.objects.create(name="testprogram")
new_program.save()
get_program = Program.objects.get(name="testprogram")
new_indicator_type = IndicatorType.objects.create(indicator_type="testtype")
new_indicator_type.save()
get_indicator_type = IndicatorType.objects.get(indicator_type="testtype")
new_disaggregation = DisaggregationType.objects.create(disaggregation_type="disagg")
new_disaggregation.save()
get_disaggregation = DisaggregationType.objects.get(disaggregation_type="disagg")
new_frequency = ReportingFrequency.objects.create(frequency="newfreq")
new_frequency.save()
get_frequency = ReportingFrequency.objects.get(frequency="newfreq")
user = User.objects.create_user('john', '[email protected]', 'johnpassword')
user.save()
get_user = User.objects.get(username='john')
new_indicator = Indicator.objects.create(name="testindicator",number="1.2.3",source="testing",
disaggregation=get_disaggregation, baseline="10",lop_target="10", reporting_frequency=get_frequency,owner=get_user)
new_indicator.save()
get_indicator = Indicator.objects.get(name="testindicator")
new_collected = CollectedData.objects.create(targeted="12",achieved="20", description="somevaluecollected", indicator=get_indicator)
new_collected.save()
def test_indicator_exists(self):
"""Check for Indicator object"""
get_indicator = Indicator.objects.get(name="testindicator")
self.assertEqual(Indicator.objects.filter(id=get_indicator.id).count(), 1)
def test_collected_exists(self):
"""Check for CollectedData object"""
get_collected = CollectedData.objects.get(description="somevaluecollected")
self.assertEqual(CollectedData.objects.filter(id=get_collected.id).count(), 1)
| gpl-2.0 | 8,561,288,061,247,046,000 | 51.545455 | 164 | 0.709775 | false |
fossdevil/Assignments | Machine Learning/Assignment3Final/ML4.py | 1 | 3746 | import numpy as np
import scipy
import matplotlib.pyplot as plt
import random
# N points in d dimensions
def generatePoints(n,d):
points = []
for i in range(0,n):
point = np.random.normal(0,1,d);
p = point**2;
den = np.sqrt(sum(p));
point = list(point/den);
points.append(point);
return points;
def interPointDistance(points,n,d):
distMat = []
distance = 0;
for i in range(0,n):
disti = []
for j in range(0,n):
distance = np.linalg.norm(list(np.asarray(points[i])-np.asarray(points[j])));
disti.append(distance);
distMat.append(disti);
return distMat;
def projection(points,subspace,n):
projPoint = []
subspacet = np.asmatrix(subspace);
subspace = subspacet.T;
for i in range(0,n):
inv = np.linalg.inv(np.dot(subspacet,subspace));
proj = np.dot(np.dot(np.dot(subspace,inv),subspacet),points[i]);
projPoint.append(proj);
return projPoint;
def subspaceGen(n,d):
subspace = [];
subv = np.zeros(d);
r = np.arange(0,d);
k = list(random.sample(r,n));
j = 0;
for i in range(0,n):
subv = np.zeros(d);
subv[k[j]] = 1;
j = j+1;
subspace.append(subv);
return subspace;
n = 50;
d = 200;
points50 = generatePoints(n,d);
distMat = interPointDistance(points50,n,d);
print("Please open file \"Solution4.txt\":");
filename = "Solution4.txt"
target = open(filename,'w');
target.write("The interpoint distance Matrix is as follows:\n");
for i in range(0,n):
target.write(str(distMat[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
subspaces1 = np.asmatrix(subspaceGen(1,d));
subspaces2 = np.asmatrix(subspaceGen(2,d));
subspaces3 = np.asmatrix(subspaceGen(3,d));
subspaces10 = np.asmatrix(subspaceGen(10,d));
subspaces50 = np.asmatrix(subspaceGen(50,d));
projPoint1 = projection(points50,subspaces1,n);
projPoint2 = projection(points50,subspaces2,n);
projPoint3 = projection(points50,subspaces3,n);
projPoint10 = projection(points50,subspaces10,n);
projPoint50 = projection(points50,subspaces50,n);
distMat1 = interPointDistance(projPoint1,n,d);
distMat2 = interPointDistance(projPoint2,n,d);
distMat3 = interPointDistance(projPoint3,n,d);
distMat10 = interPointDistance(projPoint10,n,d);
distMat50 = interPointDistance(projPoint50,n,d);
num = np.sqrt(1.0/200);
diff1 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat1));
num = np.sqrt(2.0/200);
diff2 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat2));
num = np.sqrt(3.0/200);
diff3 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat3));
num = np.sqrt(10.0/200);
diff10 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat10));
num = np.sqrt(50.0/200);
diff50 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat50));
target.write("Difference matrix is as follows:\n");
target.write("For k = 1");
target.write("\n");
for i in range(0,n):
target.write(str(diff1[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 2");
target.write("\n");
for i in range(0,n):
target.write(str(diff2[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 3");
target.write("\n");
for i in range(0,n):
target.write(str(diff3[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 10");
target.write("\n");
for i in range(0,n):
target.write(str(diff10[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 50");
target.write("\n");
for i in range(0,n):
target.write(str(diff50[i]));
target.write("\n");
target.close();
| mit | 5,465,868,872,613,046,000 | 26.544118 | 82 | 0.652429 | false |
santosfamilyfoundation/SantosGUI | application/utils/patch_multiprocess.py | 1 | 1225 |
import os
import sys
import multiprocess.forking as forking
def patch_multiprocess():
if sys.platform.startswith('win'):
# First define a modified version of Popen.
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
# Second override 'Popen' class with our modified version.
forking.Popen = _Popen
| mit | -7,397,045,052,748,580,000 | 39.833333 | 81 | 0.484898 | false |
4383/street-workout-database | sport/web/commons/templatetags/common_tags.py | 1 | 3392 | __author__ = 'herve.beraud'
from datetime import datetime, timedelta
from django import template
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.timesince import timesince
from community.models import InformationMessage
from exercises.models import Category
from exercises.models import MuscleGroup
from exercises.models import Muscle
register = template.Library()
@register.inclusion_tag('common_tags/show_exercises_menu.html')
def show_exercises_menu():
categories = Category.objects.filter(active=True).count()
muscles_groups = MuscleGroup.objects.filter(active=True).count()
muscles = Muscle.objects.filter(active=True).count()
return {'categories': categories, 'muscles_group': muscles_groups, 'muscles': muscles}
@register.inclusion_tag('common_tags/image_gallery.html')
def images_gallery(images):
return {"images": images}
@register.inclusion_tag('common_tags/grid-list-gallery.html')
def grid_list_gallery(items,
display_level=True,
display_menu=True,
shortcut_menu=True,
semantic_type="exercise",
margin_bottom=False
):
return {"items": items,
"display_level": display_level,
"display_menu": display_menu,
"shortcut_menu": shortcut_menu,
"semantic_type": semantic_type,
"margin_bottom": margin_bottom
}
@register.inclusion_tag('common_tags/video_gallery.html')
def videos_gallery(videos):
return {"videos": videos}
@register.inclusion_tag('common_tags/grid-list-gallery-menu.html')
def grid_list_gallery_menu():
return {}
@register.inclusion_tag('common_tags/display_information_message.html', takes_context=True)
def display_information_message(context):
expiration_date = datetime.today() + timedelta(days=365)
cookie_date_format = "%a, %d %b %Y %I:%M:%S GMT"
try:
information_message = InformationMessage.objects.filter(
active=True,
display_date__lte=datetime.now(), expiration_date__gt=datetime.now()).latest('publish_date')
request = context['request']
if information_message.display_once:
try:
already_read_information_message_id = int(request.COOKIES.get('information_message_id'))
if already_read_information_message_id == information_message.id:
information_message = None
# Cookie not found
except TypeError:
pass
except ObjectDoesNotExist:
information_message = None
return {"information_message": information_message, "expiration_date": expiration_date.strftime(cookie_date_format)}
@register.simple_tag
def current_version():
return settings.CURRENT_VERSION
@register.simple_tag
def current_revision():
return settings.CURRENT_REVISION
@register.simple_tag
def last_update_date_since():
now = datetime.now()
update = datetime.fromtimestamp(settings.LAST_UPDATE_DATE)
return timesince(update, now)
@register.simple_tag
def last_update_date():
return datetime.fromtimestamp(settings.LAST_UPDATE_DATE)
@register.simple_tag
def last_update_status():
return settings.LAST_UPDATE_STATUS
@register.simple_tag
def debugging():
return settings.DEBUG
| gpl-2.0 | 5,402,247,503,451,819,000 | 29.558559 | 120 | 0.680425 | false |
openprocurement/openprocurement.auctions.dgf | openprocurement/auctions/dgf/views/other/question.py | 1 | 3280 | # -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
apply_patch,
context_unpack,
get_now,
json_view,
opresource,
save_auction,
)
from openprocurement.auctions.core.validation import (
validate_question_data,
validate_patch_question_data,
)
from openprocurement.auctions.core.views.mixins import AuctionQuestionResource
@opresource(name='dgfOtherAssets:Auction Questions',
collection_path='/auctions/{auction_id}/questions',
path='/auctions/{auction_id}/questions/{question_id}',
auctionsprocurementMethodType="dgfOtherAssets",
description="Auction questions")
class AuctionQuestionResource(AuctionQuestionResource):
@json_view(content_type="application/json", validators=(validate_question_data,), permission='create_question')
def collection_post(self):
"""Post a question
"""
auction = self.request.validated['auction']
if auction.status != 'active.tendering' or get_now() < auction.enquiryPeriod.startDate or get_now() > auction.enquiryPeriod.endDate:
self.request.errors.add('body', 'data', 'Can add question only in enquiryPeriod')
self.request.errors.status = 403
return
question = self.request.validated['question']
if any([i.status != 'active' for i in auction.lots if i.id == question.relatedItem]):
self.request.errors.add('body', 'data', 'Can add question only in active lot status')
self.request.errors.status = 403
return
auction.questions.append(question)
if save_auction(self.request):
self.LOGGER.info('Created auction question {}'.format(question.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_question_create'}, {'question_id': question.id}))
self.request.response.status = 201
route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=route, question_id=question.id, _query={})
return {'data': question.serialize("view")}
@json_view(content_type="application/json", permission='edit_auction', validators=(validate_patch_question_data,))
def patch(self):
"""Post an Answer
"""
auction = self.request.validated['auction']
if auction.status != 'active.tendering':
self.request.errors.add('body', 'data', 'Can\'t update question in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
if any([i.status != 'active' for i in auction.lots if i.id == self.request.context.relatedItem]):
self.request.errors.add('body', 'data', 'Can update question only in active lot status')
self.request.errors.status = 403
return
if apply_patch(self.request, src=self.request.context.serialize()):
self.LOGGER.info('Updated auction question {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_question_patch'}))
return {'data': self.request.context.serialize(auction.status)}
| apache-2.0 | -685,575,204,288,687,500 | 51.063492 | 141 | 0.651829 | false |
tommy-u/chaco | chaco/multi_line_plot.py | 1 | 16640 | """ Defines the MultiLinePlot class.
"""
from __future__ import with_statement
# Standard library imports
import warnings
from math import ceil, floor
# Major library imports
import numpy as np
from numpy import argsort, array, invert, isnan, take, transpose
# Enthought library imports
from enable.api import black_color_trait, ColorTrait, LineStyle
from traits.api import Float, List, Str, Trait, \
Bool, Callable, Property, cached_property, Instance, Array
from traitsui.api import Item, View, ScrubberEditor, HGroup
from array_data_source import ArrayDataSource
from base import arg_find_runs, bin_search
from base_xy_plot import BaseXYPlot
class MultiLinePlot(BaseXYPlot):
""" A plot consisting of multiple lines.
The data to be plotted must come from a two-dimensional array with shape M by N
stored in a MultiArrayDataSource object. M is the number of lines to be plotted,
and N is the number of points in each line.
Constructor Parameters
----------------------
index : instance of an ArrayDataSource
These are the 'x' or abscissa coordinates.
yindex : instance of ArrayDataSource
These are the 'y' coordinates.
value : instance of a MultiArrayDataSource
Note that the `scale`, `offset` and `normalized_amplitude` attributes of the
MultiLinePlot control the projection of the traces into the (x,y)
plot. In simplest case, `scale=1` and `offset=0`, and `normalized_amplitude`
controls the scaling of the traces relative to their base y value.
global_min, global_max : float
The minimum and maximum values of the data in `value`. For large
arrays, computing these could take excessive time, so they must be
provided when an instance is created.
normalized_amplitude : Float
color : ColorTrait
color_func : Callable or None
If not None, this Callable overrides `color`. The argument to `color_func`
will be the integer index of the trace to be rendered. `color_func` must
return an RGBA 4-tuple.
Default: None
orientation : str
Must be 'v' or 'h' (for 'vertical' or 'horizontal', respectively). This is
the orientation of the index axis (i.e. the 'x' axis).
Default: 'h'
fast_clip : bool
If True, traces whose *base* 'y' coordinate is outside the value axis range
are not plotted, even if some of the data in the curve extends into the plot
region.
Default: False
line_width : float
Width of the plotted lines.
line_style :
The style of the trace lines in the plot.
The following are from the original LinePlot code, and are untested:
selected_color
selected_line_style
"""
# M and N appearing in the comments are as defined in the docstring.
yindex = Instance(ArrayDataSource)
# amplitude = Float(0.0)
# `scale` and `offset` provide a more general transformation, but are currently
# untested.
scale = Float(1.0)
offset = Float(0.0)
fast_clip = Bool(False)
# The color of the lines.
color = black_color_trait
# A function that returns the color of lines. Overrides `color` if not None.
color_func = Trait(None, None, Callable)
# The color to use to highlight the line when selected.
selected_color = ColorTrait("lightyellow")
# The style of the selected line.
selected_line_style = LineStyle("solid")
# The name of the key in self.metadata that holds the selection mask
metadata_name = Str("selections")
# The thickness of the line.
line_width = Float(1.0)
# The line dash style.
line_style = LineStyle
use_global_bounds = Bool(True)
# Minimum value in the `value` data source. This must be provided
# in the call to the constructor.
global_min = Float
# Maximum value in the `value` data source. This must be provided
# in the call to the constructor.
global_max = Float
# Normalized amplitude is the value exposed to the user.
normalized_amplitude = Float(-0.5)
amplitude_scale = Property(Float, depends_on=['global_min', 'global_max', 'data',
'use_global_bounds', 'yindex'])
amplitude = Property(Float, depends_on=['normalized_amplitude',
'amplitude_scale'])
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
# The projected 2D numpy array.
_trace_data = Property(Array, depends_on=['index', 'index.data_changed',
'value', 'value.data_changed', 'yindex', 'yindex.data_changed',
'amplitude', 'scale', 'offset'])
# Cached list of non-NaN arrays of (x,y) data-space points; regardless of
# self.orientation, this is always stored as (index_pt, value_pt). This is
# different from the default BaseXYPlot definition.
_cached_data_pts = List
# Cached list of non-NaN arrays of (x,y) screen-space points.
_cached_screen_pts = List
#------------------------------------------------------------------------
#
#------------------------------------------------------------------------
def trait_view(self, obj):
"""Create a minimalist View, with just the amplitude and color attributes."""
# Minimalist Traits UI View for customizing the plot: only the trace amplitude
# and line color are exposed.
view = View(
HGroup(
Item('use_global_bounds'),
# Item('normalized_amplitude'),
# Item('normalized_amplitude', editor=RangeEditor()),
Item('normalized_amplitude',
editor=ScrubberEditor(increment=0.2, hover_color=0xFFFFFF, active_color=0xA0CD9E,
border_color=0x0000FF)),
),
Item("color", label="Trace color", style="simple"),
width=480,
title="Trace Plot Line Attributes",
buttons=["OK", "Cancel"])
return view
#------------------------------------------------------------------------
#
#------------------------------------------------------------------------
# See base_xy_plot.py for these:
## def hittest(self, screen_pt, threshold=7.0):
## def interpolate(self, index_value):
def get_screen_points(self):
self._gather_points()
scrn_pts_list = [[self.map_screen(ary) for ary in line]
for line in self._cached_data_pts]
return scrn_pts_list
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
@cached_property
def _get_amplitude_scale(self):
"""
If the amplitude is set to this value, the largest trace deviation from
its base y coordinate will be equal to the y coordinate spacing.
"""
# Note: Like the rest of the current code, this ignores the `scale` attribute.
if self.yindex is not None:
coordinates = self.yindex.get_data()
else:
coordinates = []
if len(coordinates) > 1:
dy = coordinates[1] - coordinates[0]
if dy == 0:
dy = 1.0
else:
# default coordinate spacing if there is only 1 coordinate
dy = 1.0
if self.use_global_bounds:
max_abs = max(abs(self.global_min), abs(self.global_max))
else:
data = self.value._data
max_abs = np.max(np.abs(data))
if max_abs == 0:
amp_scale = 0.5 * dy
else:
amp_scale = 0.5 * dy / max_abs
return amp_scale
@cached_property
def _get_amplitude(self):
amplitude = self.normalized_amplitude * self.amplitude_scale
return amplitude
@cached_property
def _get__trace_data(self):
"""Compute the transformed data."""
# Get the array from `value`
data = self.value._data
coordinates = self.yindex.get_data()
channel_data = self.scale*(self.amplitude*data + coordinates[:,np.newaxis]) \
+ self.offset
return channel_data
def _gather_points(self):
"""
Collects the data points that are within the bounds of the plot and
caches them.
"""
if self._cache_valid:
return
if not self.index or not self.value:
return
index = self.index.get_data()
varray = self._trace_data
if varray.size == 0:
self._cached_data_pts = []
self._cached_valid = True
return
coordinates = self.yindex.get_data()
if self.fast_clip:
coord_min = float(coordinates[0])
coord_max = coordinates[-1]
slice_min = max(0,ceil((varray.shape[0]-1)*(self.value_range.low - coord_min)/(coord_max - coord_min)))
slice_max = min(varray.shape[0], 1+floor((varray.shape[0]-1)*(self.value_range.high - coord_min)/(coord_max - coord_min)))
varray = varray[slice_min:slice_max]
# FIXME: The y coordinates must also be sliced to match varray.
# Check to see if the data is completely outside the view region.
outside = False
# Check x coordinates.
low, high = self.index.get_bounds()
if low > self.index_range.high or high < self.index_range.low:
outside = True
# Check y coordinates. Use varray because it is nased on the yindex,
# but has been shifted up or down depending on the values.
ylow, yhigh = varray.min(), varray.max()
if ylow > self.value_range.high or yhigh < self.value_range.low:
outside = True
if outside:
self._cached_data_pts = []
self._cached_valid = True
return
if len(index) == 0 or varray.shape[0] == 0 or varray.shape[1] == 0 \
or len(index) != varray.shape[1]:
self._cached_data_pts = []
self._cache_valid = True
return
size_diff = varray.shape[1] - len(index)
if size_diff > 0:
warnings.warn('Chaco.LinePlot: value.shape[1] %d - len(index) %d = %d\n' \
% (varray.shape[1], len(index), size_diff))
index_max = len(index)
varray = varray[:,:index_max]
else:
index_max = varray.shape[1]
index = index[:index_max]
# Split the index and value raw data into non-NaN chunks.
# nan_mask is a boolean M by N array.
nan_mask = invert(isnan(varray)) & invert(isnan(index))
blocks_list = []
for nm in nan_mask:
blocks = [b for b in arg_find_runs(nm, "flat") if nm[b[0]] != 0]
blocks_list.append(blocks)
line_points = []
for k, blocks in enumerate(blocks_list):
points = []
for block in blocks:
start, end = block
block_index = index[start:end]
block_value = varray[k, start:end]
index_mask = self.index_mapper.range.mask_data(block_index)
runs = [r for r in arg_find_runs(index_mask, "flat") \
if index_mask[r[0]] != 0]
# Check to see if our data view region is between two points in the
# index data. If so, then we have to reverse map our current view
# into the appropriate index and draw the bracketing points.
if runs == []:
data_pt = self.map_data((self.x_mapper.low_pos, self.y_mapper.low_pos))
if self.index.sort_order == "none":
indices = argsort(index)
sorted_index = take(index, indices)
sorted_value = take(varray[k], indices)
sort = 1
else:
sorted_index = index
sorted_value = varray[k]
if self.index.sort_order == "ascending":
sort = 1
else:
sort = -1
ndx = bin_search(sorted_index, data_pt, sort)
if ndx == -1:
# bin_search can return -1 if data_pt is outside the bounds
# of the source data
continue
z = transpose(array((sorted_index[ndx:ndx+2],
sorted_value[ndx:ndx+2])))
points.append(z)
else:
# Expand the width of every group of points so we draw the lines
# up to their next point, outside the plot area
data_end = len(index_mask)
for run in runs:
start, end = run
if start != 0:
start -= 1
if end != data_end:
end += 1
run_data = transpose(array((block_index[start:end],
block_value[start:end])))
points.append(run_data)
line_points.append(points)
self._cached_data_pts = line_points
self._cache_valid = True
return
# See base_xy_plot.py for:
## def _downsample(self):
## def _downsample_vectorized(self):
def _render(self, gc, line_points, selected_points=None):
if len(line_points) == 0:
return
with gc:
gc.set_antialias(True)
gc.clip_to_rect(self.x, self.y, self.width, self.height)
render = self._render_normal
if selected_points is not None:
gc.set_stroke_color(self.selected_color_)
gc.set_line_width(self.line_width+10.0)
gc.set_line_dash(self.selected_line_style_)
render(gc, selected_points)
if self.color_func is not None:
# Existence of self.color_func overrides self.color.
color_func = self.color_func
else:
color_func = lambda k: self.color_
tmp = list(enumerate(line_points))
# Note: the list is reversed for testing with _render_filled.
for k, points in reversed(tmp):
color = color_func(k)
# Apply the alpha
alpha = color[-1] if len(color) == 4 else 1
color = color[:3] + (alpha * self.alpha,)
gc.set_stroke_color(color)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
render(gc, points)
# Draw the default axes, if necessary
self._draw_default_axes(gc)
def _render_normal(self, gc, points):
for ary in points:
if len(ary) > 0:
gc.begin_path()
gc.lines(ary)
gc.stroke_path()
return
def _render_icon(self, gc, x, y, width, height):
with gc:
gc.set_stroke_color(self.color_)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
gc.set_antialias(0)
gc.move_to(x, y+height/2)
gc.line_to(x+width, y+height/2)
gc.stroke_path()
def _alpha_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _color_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _line_style_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _line_width_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _amplitude_changed(self):
self.value.data_changed = True
self.invalidate_draw()
self.request_redraw()
return
def __getstate__(self):
state = super(MultiLinePlot,self).__getstate__()
for key in ['traits_view']:
if state.has_key(key):
del state[key]
return state
| bsd-3-clause | -2,342,800,567,750,205,400 | 34.031579 | 134 | 0.534796 | false |
pylover/network-interfaces | network_interfaces/stanza.py | 1 | 4473 | # -*- coding: utf-8 -*-
import re
from .helpers import clean_list, list_hash
__author__ = 'vahid'
class Stanza(object):
_type = None
_filename = None
_headers = None
def __init__(self, filename, *headers):
self._filename = filename
self._headers = list(headers)
def __repr__(self):
return ' '.join(self._headers)
def _headers_hash(self):
result = 0
for h in self._headers:
result ^= h.__hash__()
return result
def __hash__(self):
return \
self._type.__hash__() ^ \
self._headers_hash()
@classmethod
def is_stanza(cls, s):
return re.match(r'^(iface|mapping|auto|allow-|source).*', s)
@classmethod
def subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in s.subclasses()]
@classmethod
def create(cls, header, filename):
cells = re.split('\s+', header)
cells = clean_list(cells)
stanza_type = cells[0]
subclasses = cls.subclasses()
# Checking for exact match
for subclass in subclasses:
if subclass._type and stanza_type == subclass._type:
return subclass(filename, *cells)
# Partial start match
for subclass in subclasses:
if subclass._type and stanza_type.startswith(subclass._type):
return subclass(filename, *cells)
def validate(self, allow_correction=False):
pass
class MultilineStanza(Stanza):
_items = None
def __init__(self, *args, **kwargs):
super(MultilineStanza, self).__init__(*args, **kwargs)
self._items = []
def __getattr__(self, item):
try:
return self[item]
except (KeyError, IndexError):
return super(MultilineStanza, self).__getattribute__(item)
#raise AttributeError('%s %s' % (object.__repr__(self), item))
def __setattr__(self, key, value):
if hasattr(self.__class__, key):
super(Stanza, self).__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, item):
if hasattr(self.__class__, item):
super(Stanza, self).__delattr__(item)
else:
del self[item]
def __contains__(self, item):
return self.__getitem_internal(item) is not None
def __getitem__(self, item):
if not isinstance(item, str):
raise TypeError(type(item))
result = self.__getitem_internal(item)
if not result:
raise KeyError(item)
return ' '.join(result[1:])
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError(type(key))
values = re.split('\s', value)
cells = self.__getitem_internal(key)
if not cells:
self.add_entry(' '.join([key] + values))
else:
del cells[1:]
cells += values
def __delitem__(self, item):
if not isinstance(item, str):
raise TypeError(type(item))
self.__delitem_internal(item)
def __repr__(self):
items = [(i[0], ' '.join(i[1:]).strip()) for i in self._items]
return '%s\n%s\n' % (
super(MultilineStanza, self).__repr__(),
'\n'.join([' %s %s' % (i[0], i[1]) for i in items if i[1]]))
def __hash__(self):
return super(MultilineStanza, self).__hash__() ^ self._items_hash()
def update(self, other):
if isinstance(other, dict):
for k, v in other.items():
self[k.replace('_', '-')] = v
else:
raise ValueError('A dict is required, but %s was passed.' % type(other))
def _items_hash(self):
result = 0
for i in self._items:
result ^= list_hash(i)
return result
def add_entry(self, l):
cells = re.split('\s+', l)
cells = clean_list(cells)
if cells and cells not in self._items:
self._items.append(cells)
def __getitem_internal(self, item):
key = item.replace('_', '-')
for i in self._items:
if i[0] == key:
return i
return None
def __delitem_internal(self, item):
key = item.replace('_', '-')
for i in self._items:
if i[0] == key:
self._items.remove(i)
return
| gpl-3.0 | -2,740,219,951,952,730,600 | 27.673077 | 84 | 0.521127 | false |
regardscitoyens/twitter-parlementaires | download_twitter.py | 1 | 1413 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, json
from twitter import Twitter, OAuth
from twitterconfig import KEY, SECRET, OAUTH_TOKEN, OAUTH_SECRET
if len(sys.argv) < 3:
sys.stderr.write("Please input both Twitter list's owner_screen_name and slug\n")
exit(1)
LIST_USER, LIST_ID = sys.argv[1:3]
if not os.path.isdir(".cache"):
os.makedirs(".cache")
t = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, KEY, SECRET))
accounts = {}
page = 1
args = {
"owner_screen_name": LIST_USER,
"include_entities": "false",
"skip_status": "true",
"count": 5000,
"cursor": -1
}
try:
args["list_id"] = long(LIST_ID)
except:
args["slug"] = LIST_ID
while args["cursor"]:
res = t.lists.members(**args)
with open(os.path.join('.cache', 'twitter-%s-%s.json' % (LIST_USER, args["cursor"] if args["cursor"] != -1 else 0)), 'w') as f:
json.dump(res, f)
args["cursor"] = res.get('next_cursor', res.get('next_cursor_str', 0))
new = 0
for account in res['users']:
name = account['screen_name'].lower()
if name not in accounts:
accounts[name] = account
new += 1
print("[INFO/%s] page %s -> %s results including %s new ; new total: %s" % (LIST_ID, page, len(res['users']), new, len(accounts)))
page += 1
with open(os.path.join('.cache', 'twitter-%s.json' % LIST_ID), 'w') as f:
json.dump(accounts, f)
| agpl-3.0 | -6,752,805,189,565,542,000 | 29.717391 | 134 | 0.602265 | false |
colin2328/asciiclass | labs/lab3/worldcup_wrangler.py | 1 | 14912 | from wrangler import dw
import sys
if(len(sys.argv) < 3):
sys.exit('Error: Please include an input and output file. Example python script.py input.csv output.csv')
w = dw.DataWrangler()
# Split data repeatedly on newline into rows
w.add(dw.Split(column=["data"],
table=0,
status="active",
drop=True,
result="row",
update=False,
insert_position="right",
row=None,
on="\n",
before=None,
after=None,
ignore_between=None,
which=1,
max=0,
positions=None,
quote_character=None))
# Cut on '"'
w.add(dw.Cut(column=[],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\"",
before=None,
after=None,
ignore_between=None,
which=1,
max=0,
positions=None))
# Cut from data between ']]' and ', '
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before=", ",
after="]]",
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data before '{'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before="{",
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data before '(\[\['
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before="\\(\\[\\[",
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on '[\[ any number FIFA any word any word \|'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\[\\[[0-9]+ FIFA [a-zA-Z]+ [a-zA-Z]+\\|",
before=None,
after=None,
ignore_between=None,
which=1,
max="0",
positions=None))
# Cut from data on ']]'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="]]",
before=None,
after=None,
ignore_between=None,
which=1,
max="0",
positions=None))
# Cut from data between '1990' and ')'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before="\\)",
after="1990",
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data after '}}'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before=None,
after="}}",
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data between '1974' and ','
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before=",",
after="1974",
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on '{{ any word |'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="{{[a-zA-Z]+\\|",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on '}}'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="}}",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Delete rows where data = '|-'
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.Eq(column=[],
table=0,
status="active",
drop=False,
lcol="data",
value="|-",
op_str="=")])))
# Cut from data on '| any number '
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\|\\d+",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Delete rows where data = '|align=center| —'
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.Eq(column=[],
table=0,
status="active",
drop=False,
lcol="data",
value="|align=center| —",
op_str="=")])))
# Cut from data on '('
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\(",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on ')'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\)",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Delete row 94
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.RowIndex(column=[],
table=0,
status="active",
drop=False,
indices=[93])])))
# Delete row 94
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.RowIndex(column=[],
table=0,
status="active",
drop=False,
indices=[93])])))
# Delete row 94
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.RowIndex(column=[],
table=0,
status="active",
drop=False,
indices=[93])])))
# Cut from data on '[\[#1\|\*'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\[\\[#1\\|\\*",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on '! Team !! Titles !! Runners-up !! Third place !! Fourth place !! Top 4 <br/> finishes'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="! Team !! Titles !! Runners-up !! Third place !! Fourth place !! Top 4 <br/> finishes",
before=None,
after=None,
ignore_between=None,
which=1,
max="0",
positions=None))
# Wrap empty rows
w.add(dw.Wrap(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.Empty(column=[],
table=0,
status="active",
drop=False,
percent_valid=0,
num_valid=0)])))
# Fold wrap, wrap1, wrap2, wrap3... using header as a key
w.add(dw.Fold(column=["wrap","wrap1","wrap2","wrap3","wrap4","wrap5"],
table=0,
status="active",
drop=False,
keys=[-1]))
# Translate value up
w.add(dw.Translate(column=["value"],
table=0,
status="active",
drop=False,
direction="up",
values=1))
# Drop value
w.add(dw.Drop(column=["value"],
table=0,
status="active",
drop=True))
# Extract from fold between positions 4, 5
w.add(dw.Extract(column=["fold"],
table=0,
status="active",
drop=False,
result="column",
update=False,
insert_position="right",
row=None,
on=None,
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=[4,5]))
# Drop fold
w.add(dw.Drop(column=["fold"],
table=0,
status="active",
drop=True))
# Extract from translate on ' any word '
w.add(dw.Extract(column=["translate"],
table=0,
status="active",
drop=False,
result="column",
update=False,
insert_position="right",
row=None,
on="[a-zA-Z]+",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Fill extract1 with values from above
w.add(dw.Fill(column=["extract1"],
table=0,
status="active",
drop=False,
direction="down",
method="copy",
row=None))
# Delete rows where extract is null
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.IsNull(column=[],
table=0,
status="active",
drop=False,
lcol="extract",
value=None,
op_str="is null")])))
# Delete rows where extract = '5'
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.Eq(column=[],
table=0,
status="active",
drop=False,
lcol="extract",
value="5",
op_str="=")])))
# Split translate repeatedly on ',' into rows
w.add(dw.Split(column=["translate"],
table=0,
status="active",
drop=True,
result="row",
update=False,
insert_position="right",
row=None,
on=",",
before=None,
after=None,
ignore_between=None,
which=1,
max="0",
positions=None,
quote_character=None))
# Cut from translate on '*'
w.add(dw.Cut(column=["translate"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\*",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Delete row 77
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.RowIndex(column=[],
table=0,
status="active",
drop=False,
indices=[76])])))
w.apply_to_file(sys.argv[1]).print_csv(sys.argv[2]) | mit | -7,326,252,831,971,630,000 | 26.206204 | 107 | 0.402267 | false |
andrmuel/gr-dab | python/qa/qa_ofdm_move_and_insert_zero.py | 1 | 1294 | #!/usr/bin/env python
from gnuradio import gr, gr_unittest, blocks
import grdab
class qa_ofdm_move_and_insert_zero(gr_unittest.TestCase):
"""
@brief QA for the block that moves the signal to the middle of the band and inserts the zero carrier in the middle.
This class implements a test bench to verify the corresponding C++ class.
"""
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_ofdm_move_and_insert_zero(self):
num_carriers = 4
fft_length = 10
d_zeros_on_left = 3
src_data0 = range(0,8)
expected_result0 = [0,0,0]+[0,1]+[0]+[2,3]+[0,0]+[0,0,0]+[4,5]+[0]+[6,7]+[0,0]
expected_result0 = [complex(x) for x in expected_result0]
src0 = blocks.vector_source_c(src_data0)
s2v0 = blocks.stream_to_vector(gr.sizeof_gr_complex, num_carriers)
ofdm_move_and_insert_zero = grdab.ofdm_move_and_insert_zero(fft_length,num_carriers)
v2s0 = blocks.vector_to_stream(gr.sizeof_gr_complex, fft_length)
dst0 = blocks.vector_sink_c()
self.tb.connect(src0, s2v0, ofdm_move_and_insert_zero, v2s0, dst0)
self.tb.run()
result_data0 = dst0.data()
# print expected_result0
# print result_data0
self.assertComplexTuplesAlmostEqual(expected_result0, result_data0, 6)
if __name__ == '__main__':
gr_unittest.main()
| gpl-3.0 | 7,527,159,771,634,715,000 | 31.35 | 116 | 0.692427 | false |
senser/xmppBot | ZenPacks/community/xmppBot/Jabber/plugins/setjid.py | 1 | 4107 | """Check if the sender is a valid zenoss admin. For access control"""
from Jabber.Plugins import Plugin
from Jabber.ZenAdapter import ZenAdapter
from Jabber.Options import Options
from optparse import OptionError
import transaction
class SetJid(Plugin):
name = 'mapuser'
capabilities = ['setjid', 'mapuser', 'help']
def call(self, args, sender, log, **kw):
log.debug('mapuser plugin running with %s' % args)
opts = self.options()
# parse the options
try:
(options, arguments) = opts.parse_args(args)
log.debug('Done parsing arguments. Options are "%s", arguments expanded to %s' % (options, arguments))
except OptionError, message:
return str(message)
if options.zenUser is None or options.jabberId is None:
return 'NO. -u and -j are both required.'
adapter = ZenAdapter()
jabberId = options.jabberId.lower()
haveUser = False
for user in adapter.userSettings():
if user.id.lower() == options.zenUser.lower():
haveUser = True
try:
currentId = user.getProperty('JabberId')
except AttributeError:
currentId = False
if currentId:
if options.jabberId == currentId.lower():
if options.force:
self.mapIds(jabberId, user)
return 'This user mapping already looks like this. Forced option was used, so I set it anyway.'
else:
return 'This user mapping already looks like this.'
if '/' in sender:
sender = sender.split('/')[0]
if currentId.lower() == sender.lower():
if options.force:
return 'This is your Zenoss user id, and the mapping is already set correctly. Changing it will prevent you from communicating with me. If you really want to change it, do so from the Zenoss interface or -f.'
else:
self.mapIds(jabberId, user)
return 'This is your Zenoss user id, and the mapping is already set correctly. However, the force option was used, so I set it anyway. Since this will probably break communication with me, you can change it back from the Zope interface.'
log.debug('Setting the jabberid mapping property to %s for zenuser %s' % (jabberId, user))
self.mapIds(jabberId, user)
break
if haveUser:
return 'JabberId for this user has been saved. Thanks.'
else:
return 'Sorry! I Could not find a Zenoss user by the name %s' % options.zenUser
def mapIds(self, jabberId, zenUser):
self.setPropertyIfNeeded(zenUser)
zenUser._updateProperty('JabberId', jabberId)
transaction.commit()
def setPropertyIfNeeded(self, zenUser):
if not zenUser.hasProperty('JabberId'):
zenUser.manage_addProperty('JabberId', '', 'string')
zenUser._setProperty('JabberId', '', 'string')
try:
zenUser.getProperty('JabberId')
except AttributeError:
zenUser.manage_addProperty('JabberId', '', 'string')
# unnecessary?
#zenUser._setProperty('JabberId', '', 'string')
def private(self):
return False
def options(self):
parser = Options(description = 'Acknowledge events by eventid', prog = 'ack')
parser.add_option('-u', '--user', dest='zenUser', help='Zenoss username (must already exist in zenoss).')
parser.add_option('-j', '--jid', dest='jabberId', help='JabberID to map to the zenoss user.')
parser.add_option('-f', '--force', dest='force', action='store_true', help='Force association even if it could disallow your own user. USE WITH CAUTION.')
return parser
def help(self):
opts = self.options()
return str(opts.help())
| gpl-2.0 | -3,075,416,412,988,753,400 | 45.146067 | 267 | 0.582177 | false |
shashisp/blumix-webpy | app/applications/welcome/controllers/default.py | 1 | 1858 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
if you need a simple wiki simply replace the two lines below with:
return auth.wiki()
"""
# response.flash = T("Hello World")
return dict(message="Hello from MyApp")
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/manage_users (requires membership in
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
| mit | -1,518,025,967,072,586,500 | 30.491525 | 79 | 0.607643 | false |
lrowe/splinter | tests/test_webdriver_phantomjs.py | 1 | 2438 | import unittest
from splinter import Browser
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests
class PhantomJSBrowserTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("phantomjs")
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_get_alert(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_right_click(self):
with self.assertRaises(NotImplementedError):
self.browser.find_by_id('visible').right_click()
def test_double_click(self):
with self.assertRaises(NotImplementedError):
self.browser.find_by_id('visible').double_click()
def test_access_prompts_and_be_able_to_fill_then(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_confirm_and_accept_and_dismiss_them_using_with(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_confirm_and_accept_and_dismiss_them(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_alerts_using_with(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_alerts_and_accept_them(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_can_work_on_popups(self):
# FIXME: Check https://github.com/detro/ghostdriver/issues/180 to see if
# we can implement this test
pass
class PhantomJSBrowserTestWithCustomHeaders(unittest.TestCase):
@classmethod
def setUpClass(cls):
custom_headers = {'X-Splinter-Customheaders-1': 'Hello',
'X-Splinter-Customheaders-2': 'Bye'}
cls.browser = Browser("phantomjs", custom_headers=custom_headers)
def test_create_a_phantomjs_with_custom_headers(self):
self.browser.visit(EXAMPLE_APP + 'headers')
self.assertTrue(
self.browser.is_text_present('X-Splinter-Customheaders-1: Hello'))
self.assertTrue(
self.browser.is_text_present('X-Splinter-Customheaders-2: Bye'))
@classmethod
def tearDownClass(cls):
cls.browser.quit()
| bsd-3-clause | 176,557,959,462,295,600 | 31.506667 | 80 | 0.66653 | false |
daymer/xWIKI_Karma | CustomModules/mysql-connector-python-2.1.7/lib/cpy_distutils.py | 1 | 24414 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implements the DistUtils command 'build_ext'
"""
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.errors import DistutilsExecError
from distutils.util import get_platform
from distutils.dir_util import copy_tree
from distutils import log
from glob import glob
import os
import shlex
import struct
from subprocess import Popen, PIPE, STDOUT, check_call
import sys
import platform
import shutil
ARCH_64BIT = sys.maxsize > 2**32 # Works with Python 2.6 and greater
py_arch = '64-bit' if ARCH_64BIT else '32-bit'
CEXT_OPTIONS = [
('with-mysql-capi=', None,
"Location of MySQL C API installation or path to mysql_config"),
('extra-compile-args=', None,
"Extra compile args"),
('extra-link-args=', None,
"Extra link args")
]
CEXT_STATIC_OPTIONS = [
('static', None,
"Link C libraries statically with the C Extension"),
]
INSTALL_OPTIONS = [
('byte-code-only=', None,
"Remove Python .py files; leave byte code .pyc only"),
]
def win_dll_is64bit(dll_file):
"""Check if a Windows DLL is 64 bit or not
Returns True if the library dll_file is 64bit.
Raises ValueError when magic of header is invalid.
Raises IOError when file could not be read.
Raises OSError when execute on none-Windows platform.
Returns True or False.
"""
if os.name != 'nt':
raise OSError("win_ddl_is64bit only useful on Windows")
with open(dll_file, 'rb') as fp:
# IMAGE_DOS_HEADER
e_magic = fp.read(2)
if e_magic != b'MZ':
raise ValueError("Wrong magic in header")
fp.seek(60)
offset = struct.unpack("I", fp.read(4))[0]
# IMAGE_FILE_HEADER
fp.seek(offset)
file_header = fp.read(6)
(signature, machine) = struct.unpack("<4sH", file_header)
if machine == 0x014c: # IMAGE_FILE_MACHINE_I386
return False
elif machine in (0x8664, 0x2000): # IMAGE_FILE_MACHINE_I386/AMD64
return True
def unix_lib_is64bit(lib_file):
"""Check if a library on UNIX is 64 bit or not
This function uses the `file` command to check if a library on
UNIX-like platforms is 32 or 64 bit.
Returns True if the library is 64bit.
Raises ValueError when magic of header is invalid.
Raises IOError when file could not be read.
Raises OSError when execute on none-Windows platform.
Returns True or False.
"""
if os.name != 'posix':
raise OSError("unix_lib_is64bit only useful on UNIX-like systems")
if os.isdir(lib_file):
mysqlclient_libs = []
for root, _, files in os.walk(lib_file):
for filename in files:
filepath = os.path.join(root, filename)
if filename.startswith('libmysqlclient') and \
not os.path.islink(filepath) and \
'_r' not in filename and \
'.a' not in filename:
mysqlclient_libs.append(filepath)
if mysqlclient_libs:
break
# give priority to .so files instead of .a
mysqlclient_libs.sort()
lib_file = mysqlclient_libs[-1]
log.debug("# Using file command to test lib_file {0}".format(lib_file))
if platform.uname() == 'SunOS':
cmd_list = ['file', '-L', lib_file]
else:
cmd_list = ['file', '-L', lib_file]
prc = Popen(cmd_list, stdin=PIPE, stderr=STDOUT,
stdout=PIPE)
stdout = prc.communicate()[0]
stdout = stdout.split(':')[1]
log.debug("# lib_file {0} stdout: {1}".format(lib_file, stdout))
if 'x86_64' in stdout or 'x86-64' in stdout or '32-bit' not in stdout:
return True
return False
def parse_mysql_config_info(options, stdout):
log.debug("# stdout: {0}".format(stdout))
info = {}
for option, line in zip(options, stdout.split('\n')):
log.debug("# option: {0}".format(option))
log.debug("# line: {0}".format(line))
info[option] = line.strip()
ver = info['version']
if '-' in ver:
ver, _ = ver.split('-', 2)
info['version'] = tuple([int(v) for v in ver.split('.')[0:3]])
libs = shlex.split(info['libs'])
info['lib_dir'] = libs[0].replace('-L', '')
info['libs'] = [ lib.replace('-l', '') for lib in libs[1:] ]
if platform.uname()[0] == 'SunOS':
info['lib_dir'] = info['lib_dir'].replace('-R', '')
info['libs'] = [lib.replace('-R', '') for lib in info['libs']]
log.debug("# info['libs']: ")
for lib in info['libs']:
log.debug("# {0}".format(lib))
libs = shlex.split(info['libs_r'])
info['lib_r_dir'] = libs[0].replace('-L', '')
info['libs_r'] = [ lib.replace('-l', '') for lib in libs[1:] ]
info['include'] = [x.strip() for x in info['include'].split('-I')[1:]]
return info
def get_mysql_config_info(mysql_config):
"""Get MySQL information using mysql_config tool
Returns a dict.
"""
options = ['cflags', 'include', 'libs', 'libs_r', 'plugindir', 'version']
cmd = [mysql_config] + [ "--{0}".format(opt) for opt in options ]
try:
proc = Popen(cmd, stdout=PIPE, universal_newlines=True)
stdout, _ = proc.communicate()
except OSError as exc:
raise DistutilsExecError("Failed executing mysql_config: {0}".format(
str(exc)))
info = parse_mysql_config_info(options, stdout)
# Try to figure out the architecture
info['arch'] = None
if os.name == 'posix':
if platform.uname()[0] == 'SunOS':
print("info['lib_dir']: {0}".format(info['lib_dir']))
print("info['libs'][0]: {0}".format(info['libs'][0]))
pathname = os.path.abspath(os.path.join(info['lib_dir'],
'lib',
info['libs'][0])) + '/*'
else:
pathname = os.path.join(info['lib_dir'],
'lib' + info['libs'][0]) + '*'
print("# Looking mysqlclient_lib at path: {0}".format(pathname))
log.debug("# searching mysqlclient_lib at: %s", pathname)
libs = glob(pathname)
mysqlclient_libs = []
for filepath in libs:
_, filename = os.path.split(filepath)
log.debug("# filename {0}".format(filename))
if filename.startswith('libmysqlclient') and \
not os.path.islink(filepath) and \
'_r' not in filename and \
'.a' not in filename:
mysqlclient_libs.append(filepath)
mysqlclient_libs.sort()
stdout = None
try:
log.debug("# mysqlclient_lib: {0}".format(mysqlclient_libs[-1]))
for mysqlclient_lib in mysqlclient_libs:
log.debug("#+ {0}".format(mysqlclient_lib))
log.debug("# tested mysqlclient_lib[-1]: "
"{0}".format(mysqlclient_libs[-1]))
if platform.uname()[0] == 'SunOS':
print("mysqlclient_lib: {0}".format(mysqlclient_libs[-1]))
cmd_list = ['file', mysqlclient_libs[-1]]
else:
cmd_list = ['file', '-L', mysqlclient_libs[-1]]
proc = Popen(cmd_list, stdout=PIPE,
universal_newlines=True)
stdout, _ = proc.communicate()
stdout = stdout.split(':')[1]
except OSError as exc:
raise DistutilsExecError(
"Although the system seems POSIX, the file-command could not "
"be executed: {0}".format(str(exc)))
if stdout:
if '64' in stdout:
info['arch'] = "x86_64"
else:
info['arch'] = "i386"
else:
raise DistutilsExecError(
"Failed getting out put from the file-command"
)
else:
raise DistutilsExecError(
"Cannot determine architecture on {0} systems".format(os.name))
return info
def remove_cext(distribution):
"""Remove the C Extension from the distribution
This function can be useful in Distutils commands for creating
pure Python modules.
"""
to_remove = []
for ext_mod in distribution.ext_modules:
if ext_mod.name == '_mysql_connector':
to_remove.append(ext_mod)
for ext_mod in to_remove:
distribution.ext_modules.remove(ext_mod)
class BuildExtDynamic(build_ext):
"""Build Connector/Python C Extension"""
description = "build Connector/Python C Extension"
user_options = build_ext.user_options + CEXT_OPTIONS
min_connector_c_version = None
arch = None
_mysql_config_info = None
def initialize_options(self):
build_ext.initialize_options(self)
self.extra_compile_args = None
self.extra_link_args = None
self.with_mysql_capi = None
def _finalize_connector_c(self, connc_loc):
"""Finalize the --with-connector-c command line argument
"""
platform = get_platform()
self._mysql_config_info = None
min_version = BuildExtDynamic.min_connector_c_version
err_invalid_loc = "MySQL C API location is invalid; was %s"
mysql_config = None
err_version = "MySQL C API {0}.{1}.{2} or later required".format(
*BuildExtDynamic.min_connector_c_version)
if not os.path.exists(connc_loc):
log.error(err_invalid_loc, connc_loc)
sys.exit(1)
if os.path.isdir(connc_loc):
# if directory, and no mysql_config is available, figure out the
# lib/ and include/ folders from the the filesystem
mysql_config = os.path.join(connc_loc, 'bin', 'mysql_config')
if os.path.isfile(mysql_config) and \
os.access(mysql_config, os.X_OK):
connc_loc = mysql_config
log.debug("# connc_loc: {0}".format(connc_loc))
else:
# Probably using MS Windows
myconfigh = os.path.join(connc_loc, 'include', 'my_config.h')
if not os.path.exists(myconfigh):
log.error("MySQL C API installation invalid "
"(my_config.h not found)")
sys.exit(1)
else:
with open(myconfigh, 'rb') as fp:
for line in fp.readlines():
if b'#define VERSION' in line:
version = tuple([
int(v) for v in
line.split()[2].replace(
b'"', b'').split(b'.')
])
if version < min_version:
log.error(err_version);
sys.exit(1)
break
# On Windows we check libmysql.dll
if os.name == 'nt':
lib = os.path.join(self.with_mysql_capi, 'lib',
'libmysql.dll')
connc_64bit = win_dll_is64bit(lib)
# On OSX we check libmysqlclient.dylib
elif 'macos' in platform:
lib = os.path.join(self.with_mysql_capi, 'lib',
'libmysqlclient.dylib')
connc_64bit = unix_lib_is64bit(lib)
# On other Unices we check libmysqlclient (follow symlinks)
elif os.name == 'posix':
connc_64bit = unix_lib_is64bit(connc_loc)
else:
raise OSError("Unsupported platform: %s" % os.name)
include_dirs = [os.path.join(connc_loc, 'include')]
if os.name == 'nt':
libraries = ['libmysql']
else:
libraries = ['-lmysqlclient']
library_dirs = os.path.join(connc_loc, 'lib')
log.debug("# connc_64bit: {0}".format(connc_64bit))
if connc_64bit:
self.arch = 'x86_64'
else:
self.arch = 'i386'
# We were given the location of the mysql_config tool (not on Windows)
if not os.name == 'nt' and os.path.isfile(connc_loc) \
and os.access(connc_loc, os.X_OK):
mysql_config = connc_loc
# Check mysql_config
myc_info = get_mysql_config_info(mysql_config)
log.debug("# myc_info: {0}".format(myc_info))
if myc_info['version'] < min_version:
log.error(err_version)
sys.exit(1)
include_dirs = myc_info['include']
libraries = myc_info['libs']
library_dirs = myc_info['lib_dir']
self._mysql_config_info = myc_info
self.arch = self._mysql_config_info['arch']
connc_64bit = self.arch == 'x86_64'
for include_dir in include_dirs:
if not os.path.exists(include_dir):
log.error(err_invalid_loc, connc_loc)
sys.exit(1)
# Set up the build_ext class
self.include_dirs.extend(include_dirs)
self.libraries.extend(libraries)
self.library_dirs.append(library_dirs)
# We try to offer a nice message when the architecture of Python
# is not the same as MySQL Connector/C binaries.
print("# self.arch: {0}".format(self.arch))
if ARCH_64BIT != connc_64bit:
log.error("Python is {0}, but does not "
"match MySQL C API {1} architecture, "
"type: {2}"
"".format(py_arch,
'64-bit' if connc_64bit else '32-bit',
self.arch))
sys.exit(1)
def finalize_options(self):
self.set_undefined_options(
'install',
('extra_compile_args', 'extra_compile_args'),
('extra_link_args', 'extra_link_args'),
('with_mysql_capi', 'with_mysql_capi'))
build_ext.finalize_options(self)
print("# Python architecture: {0}".format(py_arch))
print("# Python ARCH_64BIT: {0}".format(ARCH_64BIT))
if self.with_mysql_capi:
self._finalize_connector_c(self.with_mysql_capi)
def fix_compiler(self):
platform = get_platform()
cc = self.compiler
if not cc:
return
if 'macosx-10.9' in platform:
for needle in ['-mno-fused-madd']:
try:
cc.compiler.remove(needle)
cc.compiler_so.remove(needle)
except ValueError:
# We are removing, so OK when needle not there
pass
for name, args in cc.__dict__.items():
if not args or not isinstance(args, list):
continue
new_args = []
enum_args = enumerate(args)
for i, arg in enum_args:
if arg == '-arch':
# Skip not needed architecture
if args[i+1] != self.arch:
next(enum_args)
else:
new_args.append(arg)
else:
new_args.append(arg)
try:
cc.setattr(name, new_args)
except AttributeError:
# Old class
cc.__dict__[name] = new_args
# Add system headers to Extensions extra_compile_args
sysheaders = [ '-isystem' + dir for dir in cc.include_dirs]
for ext in self.extensions:
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
# Add system headers
for sysheader in sysheaders:
if sysheader not in ext.extra_compile_args:
ext.extra_compile_args.append(sysheader)
# Stop warnings about unknown pragma
if os.name != 'nt':
ext.extra_compile_args.append('-Wno-unknown-pragmas')
def run(self):
"""Run the command"""
if os.name == 'nt':
for ext in self.extensions:
# Use the multithread, static version of the run-time library
ext.extra_compile_args.append("/MT")
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
build_ext.run(self)
else:
self.real_build_extensions = self.build_extensions
self.build_extensions = lambda: None
build_ext.run(self)
self.fix_compiler()
self.real_build_extensions()
class BuildExtStatic(BuildExtDynamic):
"""Build and Link libraries statically with the C Extensions"""
user_options = build_ext.user_options + CEXT_OPTIONS
def finalize_options(self):
install_obj = self.distribution.get_command_obj('install')
install_obj.with_mysql_capi = self.with_mysql_capi
install_obj.extra_compile_args = self.extra_compile_args
install_obj.extra_link_args = self.extra_link_args
install_obj.static = True
options_pairs = []
if not self.extra_compile_args:
options_pairs.append(('extra_compile_args', 'extra_compile_args'))
if not self.extra_link_args:
options_pairs.append(('extra_link_args', 'extra_link_args'))
if not self.with_mysql_capi:
options_pairs.append(('with_mysql_capi', 'with_mysql_capi'))
if options_pairs:
self.set_undefined_options('install', *options_pairs)
build_ext.finalize_options(self)
print("# Python architecture: {0}".format(py_arch))
print("# Python ARCH_64BIT: {0}".format(ARCH_64BIT))
self.connc_lib = os.path.join(self.build_temp, 'connc', 'lib')
self.connc_include = os.path.join(self.build_temp, 'connc', 'include')
if self.with_mysql_capi:
self._finalize_connector_c(self.with_mysql_capi)
def _finalize_connector_c(self, connc_loc):
if not os.path.isdir(connc_loc):
log.error("MySQL C API should be a directory")
sys.exit(1)
log.info("Copying MySQL libraries")
copy_tree(os.path.join(connc_loc, 'lib'), self.connc_lib)
log.info("Copying MySQL header files")
copy_tree(os.path.join(connc_loc, 'include'), self.connc_include)
# Remove all but static libraries to force static linking
if os.name == 'posix':
log.info("Removing non-static MySQL libraries from %s" % self.connc_lib)
for lib_file in os.listdir(self.connc_lib):
lib_file_path = os.path.join(self.connc_lib, lib_file)
if os.path.isfile(lib_file_path) and not lib_file.endswith('.a'):
os.unlink(os.path.join(self.connc_lib, lib_file))
def fix_compiler(self):
BuildExtDynamic.fix_compiler(self)
include_dirs = []
library_dirs = []
libraries = []
if os.name == 'posix':
include_dirs.append(self.connc_include)
library_dirs.append(self.connc_lib)
if self.with_mysql_capi:
libraries.append("mysqlclient")
# As we statically link and the "libmysqlclient.a" library
# carry no information what it depends on, we need to
# manually add library dependencies here.
if platform.system() not in ["Darwin", "Windows"]:
libraries.append("rt")
for ext in self.extensions:
ext.include_dirs.extend(include_dirs)
ext.library_dirs.extend(library_dirs)
ext.libraries.extend(libraries)
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
class InstallLib(install_lib):
user_options = install_lib.user_options + CEXT_OPTIONS + INSTALL_OPTIONS
boolean_options = ['byte-code-only']
def initialize_options(self):
install_lib.initialize_options(self)
self.byte_code_only = None
def finalize_options(self):
install_lib.finalize_options(self)
self.set_undefined_options('install',
('byte_code_only', 'byte_code_only'))
self.set_undefined_options('build', ('build_base', 'build_dir'))
def run(self):
self.build()
outfiles = self.install()
# (Optionally) compile .py to .pyc
if outfiles is not None and self.distribution.has_pure_modules():
self.byte_compile(outfiles)
if self.byte_code_only:
for source_file in outfiles:
if os.path.join('mysql', '__init__.py') in source_file:
continue
log.info("Removing %s", source_file)
os.remove(source_file)
class Install(install):
"""Install Connector/Python C Extension"""
description = "install MySQL Connector/Python"
user_options = install.user_options + CEXT_OPTIONS + INSTALL_OPTIONS + \
CEXT_STATIC_OPTIONS
boolean_options = ['byte-code-only', 'static']
need_ext = False
def initialize_options(self):
install.initialize_options(self)
self.extra_compile_args = None
self.extra_link_args = None
self.with_mysql_capi = None
self.byte_code_only = None
self.static = None
def finalize_options(self):
if self.static:
log.info("Linking C Extension statically with libraries")
self.distribution.cmdclass['build_ext'] = BuildExtStatic
if self.byte_code_only is None:
self.byte_code_only = False
build_ext_obj = self.distribution.get_command_obj('build_ext')
build_ext_obj.with_mysql_capi = self.with_mysql_capi
build_ext_obj.extra_compile_args = self.extra_compile_args
build_ext_obj.extra_link_args = self.extra_link_args
build_ext_obj.static = self.static
if self.with_mysql_capi:
self.need_ext = True
if not self.need_ext:
remove_cext(self.distribution)
install.finalize_options(self)
def run(self):
if not self.need_ext:
log.info("Not Installing MySQL C Extension")
else:
log.info("Installing MySQL C Extension")
install.run(self)
| apache-2.0 | -3,967,664,341,499,863,600 | 35.712782 | 84 | 0.560908 | false |
thorwhalen/ut | ml/skwrap/feature_extraction/dict_vectorizer.py | 1 | 7588 |
__author__ = 'thor'
from sklearn.feature_extraction import DictVectorizer
from sklearn.externals import six
import numpy as np
from pandas import DataFrame
from collections import Counter
class IterDictVectorizer(DictVectorizer):
"""Transforms lists of feature-value mappings or rows of a dataframe to vectors.
It is like DictVectorizer (whose description was copied below), but:
(1) works with pandas DataFrame X input (rows become feature-value mappings dict)
(2) a minimum number of feature=value counts can be specified (by min_count)
(3) The fit is faster than with DictVectorizer (at least with DataFrame input)
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
min_count: positive float or int:
If min_count >= 1, min_count is the minimum number of feature=value count.
If min_count < 1, min_count represent the minimum proportion of the data that should have feature=value
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
>>> from ut.ml.skwrap.feature_extraction import IterDictVectorizer
>>> from pandas import DataFrame
>>> v = IterDictVectorizer(sparse=False)
>>> D = DataFrame([{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}])
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True, min_count=0):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
self.min_count = min_count
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
feature_template = "{}" + self.separator + "{}"
if isinstance(X, DataFrame):
counts_of = dict()
for col, val in X.items():
counts_of[col] = Counter(val.dropna())
self.feature_counts_ = {}
_min_count = self.min_count
if self.min_count < 1:
_min_count *= len(X)
else:
_min_count = self.min_count
self.df_columns_ = set()
for k, v in counts_of.items():
for kk, vv in v.items():
if vv >= _min_count:
self.feature_counts_[feature_template.format(k, kk)] = vv
self.df_columns_.add(k)
feature_names = list(self.feature_counts_.keys())
else:
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = feature_template.format(f, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def transform(self, X, y=None):
if isinstance(X, DataFrame):
X = map(lambda x: x[1].dropna().to_dict(), X.iterrows())
return super(IterDictVectorizer, self).transform(X)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
class IterDictVectorizerWithText(object):
def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True, min_count=0,
text_vectorizers={}):
self.dict_vectorizer = IterDictVectorizer(
dtype=dtype, separator=separator, sparse=sparse, sort=sort, min_count=min_count
)
self.text_vectorizers = text_vectorizers
def fit(self, X, y=None):
# input validation
assert isinstance(X, DataFrame), "X must be a pandas DataFrame"
if not set(self.text_vectorizers.keys()).issubset(X.columns):
RuntimeError("The following columns were specified in text_vectorizers, but were not in X:\n" +
" {}".format(set(self.text_vectorizers.keys()).difference(X.columns)))
# carry out the normal IterDictVectorizer.fit() for columns not in text_vectorizers
self.dict_vectorizer_cols_ = set(X.columns).difference(list(self.text_vectorizers.keys()))
self.dict_vectorizer.fit(X[self.dict_vectorizer_cols_])
self.vocabulary_ = self.dict_vectorizer.vocabulary_
# use the CounterVectorizers of text_vectorizers to fit the specified string columns
for col in set(X.columns).intersection(list(self.text_vectorizers.keys())):
self.text_vectorizers[col].fit(X[col])
offset = len(self.vocabulary_)
self.vocabulary_ = dict(self.vocabulary_,
**{k : v + offset for k, v in self.text_vectorizers[col].items()})
self.feature_names_ = list(self.vocabulary_.keys())
def transform(self, X, y=None):
X1 = self.dict_vectorizer.transform(X[self.dict_vectorizer_cols_])
X2 = np.hstack((map(lambda col: self.text_vectorizers[col].transform(X[col]), list(self.text_vectorizers.keys()))))
return np.hstack((X1, X2))
| mit | 8,670,410,577,151,383,000 | 37.912821 | 123 | 0.599895 | false |
pebble/spacel-provision | src/test/provision/orbit/test_provider.py | 1 | 1145 | from mock import MagicMock
from spacel.provision.orbit.provider import ProviderOrbitFactory
from test import BaseSpaceAppTest, ORBIT_REGION
TEST_PROVIDER = 'test'
class TestProviderOrbitFactory(BaseSpaceAppTest):
def setUp(self):
super(TestProviderOrbitFactory, self).setUp()
self.provider = MagicMock()
self.orbit_factory = ProviderOrbitFactory({
TEST_PROVIDER: self.provider
})
self.orbit.regions[ORBIT_REGION].provider = TEST_PROVIDER
def test_get_orbit(self):
self.orbit_factory.orbit(self.orbit)
self.provider.orbit.assert_called_once_with(self.orbit,
regions=[ORBIT_REGION])
def test_get_orbit_provider_not_found(self):
self.orbit.regions[ORBIT_REGION].provider = 'does-not-exist'
self.orbit_factory.orbit(self.orbit)
self.provider.orbit.assert_not_called()
def test_get(self):
orbit_factory = ProviderOrbitFactory.get(None, None, None, None, None,
None)
self.assertEqual(2, len(orbit_factory._providers))
| mit | 3,872,497,890,873,925,000 | 34.78125 | 78 | 0.638428 | false |
thebenwaters/openclickio | core/urls.py | 1 | 1643 | from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import StudentListLectureView, InstructorListLectureView, RegisteredLectureCreateView, CreateLectureView, \
LectureDetailView, LectureListView, UserRegisteredLectureView, UserRegisteredLectureApproveView, \
CreateAttendanceView, RosterDetailView, RegistrationDeleteView, StudentLectureDetailView
urlpatterns = patterns('',
url(r'^instructor/$', InstructorListLectureView.as_view(), name='instructor-class-list'),
url(r'^student/$', StudentListLectureView.as_view(), name='student-class-list'),
url(r'^lecture/register/$',RegisteredLectureCreateView.as_view(), name='register-lecture'),
url(r'^lecture/registration/(?P<pk>\d+)/delete/$', RegistrationDeleteView.as_view(), name='delete-registration'),
url(r'^lecture/pending/(?P<pk>\d+)/(?P<approved>[-_\w]+)/$', UserRegisteredLectureApproveView.as_view(), name='approve-deny-lecture'),
url(r'^lecture/create/$', CreateLectureView.as_view(), name='create-lecture'),
url(r'^lecture/(?P<pk>[-_\w]+)/$', LectureDetailView.as_view(), name='detail-lecture'),
url(r'^lecture/(?P<pk>[-_\w]+)/student/$', StudentLectureDetailView.as_view(), name='student-detail-lecture'),
url(r'^lecture/(?P<pk>[-_\w]+)/create/attendance/$', CreateAttendanceView.as_view() , name='create-attendance'),
url(r'^lecture/(?P<pk>[-_\w]+)/roster/$', RosterDetailView.as_view() , name='roster-detail'),
url(r'^lectures/$', LectureListView.as_view(), name='list-all-lecture'),
url(r'^lectures/user/$', UserRegisteredLectureView.as_view(), name='users-registered-classes'),
)
| mit | 5,296,458,793,922,725,000 | 81.15 | 138 | 0.725502 | false |
tensorflow/addons | tensorflow_addons/image/dense_image_warp.py | 1 | 10213 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image warping using per-pixel flow vectors."""
import tensorflow as tf
from tensorflow_addons.utils import types
from typing import Optional
@tf.function
def interpolate_bilinear(
grid: types.TensorLike,
query_points: types.TensorLike,
indexing: str = "ij",
name: Optional[str] = None,
) -> tf.Tensor:
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape
`[batch, N, 2]`.
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
name: a name for the operation (optional).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the
inputs invalid.
"""
if indexing != "ij" and indexing != "xy":
raise ValueError("Indexing mode must be 'ij' or 'xy'")
with tf.name_scope(name or "interpolate_bilinear"):
grid = tf.convert_to_tensor(grid)
query_points = tf.convert_to_tensor(query_points)
# grid shape checks
grid_static_shape = grid.shape
grid_shape = tf.shape(grid)
if grid_static_shape.dims is not None:
if len(grid_static_shape) != 4:
raise ValueError("Grid must be 4D Tensor")
if grid_static_shape[1] is not None and grid_static_shape[1] < 2:
raise ValueError("Grid height must be at least 2.")
if grid_static_shape[2] is not None and grid_static_shape[2] < 2:
raise ValueError("Grid width must be at least 2.")
# query_points shape checks
query_static_shape = query_points.shape
query_shape = tf.shape(query_points)
if query_static_shape.dims is not None:
if len(query_static_shape) != 3:
raise ValueError("Query points must be 3 dimensional.")
query_hw = query_static_shape[2]
if query_hw is not None and query_hw != 2:
raise ValueError("Query points last dimension must be 2.")
batch_size, height, width, channels = (
grid_shape[0],
grid_shape[1],
grid_shape[2],
grid_shape[3],
)
num_queries = query_shape[1]
query_type = query_points.dtype
grid_type = grid.dtype
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == "ij" else [1, 0]
unstacked_query_points = tf.unstack(query_points, axis=2, num=2)
for i, dim in enumerate(index_order):
with tf.name_scope("dim-" + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = grid_shape[i + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = tf.cast(size_in_indexing_dimension - 2, query_type)
min_floor = tf.constant(0.0, dtype=query_type)
floor = tf.math.minimum(
tf.math.maximum(min_floor, tf.math.floor(queries)), max_floor
)
int_floor = tf.cast(floor, tf.dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = tf.cast(queries - floor, grid_type)
min_alpha = tf.constant(0.0, dtype=grid_type)
max_alpha = tf.constant(1.0, dtype=grid_type)
alpha = tf.math.minimum(tf.math.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = tf.expand_dims(alpha, 2)
alphas.append(alpha)
flattened_grid = tf.reshape(grid, [batch_size * height * width, channels])
batch_offsets = tf.reshape(
tf.range(batch_size) * height * width, [batch_size, 1]
)
# This wraps tf.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using tf.gather_nd.
def gather(y_coords, x_coords, name):
with tf.name_scope("gather-" + name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = tf.gather(flattened_grid, linear_coordinates)
return tf.reshape(gathered_values, [batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], "top_left")
top_right = gather(floors[0], ceils[1], "top_right")
bottom_left = gather(ceils[0], floors[1], "bottom_left")
bottom_right = gather(ceils[0], ceils[1], "bottom_right")
# now, do the actual interpolation
with tf.name_scope("interpolate"):
interp_top = alphas[1] * (top_right - top_left) + top_left
interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left
interp = alphas[0] * (interp_bottom - interp_top) + interp_top
return interp
def _get_dim(x, idx):
if x.shape.ndims is None:
return tf.shape(x)[idx]
return x.shape[idx] or tf.shape(x)[idx]
@tf.function
def dense_image_warp(
image: types.TensorLike, flow: types.TensorLike, name: Optional[str] = None
) -> tf.Tensor:
"""Image warping using per-pixel flow vectors.
Apply a non-linear warp to the image, where the warp is specified by a
dense flow field of offset vectors that define the correspondences of
pixel values in the output image back to locations in the source image.
Specifically, the pixel value at `output[b, j, i, c]` is
`images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c]`.
The locations specified by this formula do not necessarily map to an int
index. Therefore, the pixel value is obtained by bilinear
interpolation of the 4 nearest pixels around
`(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1])`. For locations outside
of the image, we use the nearest pixel values at the image boundary.
NOTE: The definition of the flow field above is different from that
of optical flow. This function expects the negative forward flow from
output image to source image. Given two images `I_1` and `I_2` and the
optical flow `F_12` from `I_1` to `I_2`, the image `I_1` can be
reconstructed by `I_1_rec = dense_image_warp(I_2, -F_12)`.
Args:
image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
name: A name for the operation (optional).
Note that image and flow can be of type `tf.half`, `tf.float32`, or
`tf.float64`, and do not necessarily have to be the same type.
Returns:
A 4-D float `Tensor` with shape`[batch, height, width, channels]`
and same type as input image.
Raises:
ValueError: if `height < 2` or `width < 2` or the inputs have the wrong
number of dimensions.
"""
with tf.name_scope(name or "dense_image_warp"):
image = tf.convert_to_tensor(image)
flow = tf.convert_to_tensor(flow)
batch_size, height, width, channels = (
_get_dim(image, 0),
_get_dim(image, 1),
_get_dim(image, 2),
_get_dim(image, 3),
)
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
grid_x, grid_y = tf.meshgrid(tf.range(width), tf.range(height))
stacked_grid = tf.cast(tf.stack([grid_y, grid_x], axis=2), flow.dtype)
batched_grid = tf.expand_dims(stacked_grid, axis=0)
query_points_on_grid = batched_grid - flow
query_points_flattened = tf.reshape(
query_points_on_grid, [batch_size, height * width, 2]
)
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = interpolate_bilinear(image, query_points_flattened)
interpolated = tf.reshape(interpolated, [batch_size, height, width, channels])
return interpolated
@tf.function(experimental_implements="addons:DenseImageWarp")
def dense_image_warp_annotated(
image: types.TensorLike, flow: types.TensorLike, name: Optional[str] = None
) -> tf.Tensor:
"""Similar to dense_image_warp but annotated with experimental_implements.
IMPORTANT: This is a temporary function and will be removed after TensorFlow's
next release.
This annotation make the serialized function detectable by the TFLite MLIR
converter and allow the converter to convert it to corresponding TFLite op.
However, with the annotation, this function cannot be used with backprop
under `tf.GradientTape` objects.
"""
return dense_image_warp(image, flow, name)
| apache-2.0 | 6,193,261,979,639,195,000 | 41.202479 | 87 | 0.61784 | false |
Lana-B/Pheno4T | madanalysis/layout/histogram_core.py | 1 | 2479 | ################################################################################
#
# Copyright (C) 2012-2013 Eric Conte, Benjamin Fuks
# The MadAnalysis development team, email: <[email protected]>
#
# This file is part of MadAnalysis 5.
# Official website: <https://launchpad.net/madanalysis5>
#
# MadAnalysis 5 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MadAnalysis 5 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MadAnalysis 5. If not, see <http://www.gnu.org/licenses/>
#
################################################################################
import logging
from math import sqrt
class HistogramCore:
def __init__(self):
import numpy
self.integral = 0
self.nevents = 0
self.nentries = 0
self.sumwentries = 0
self.sumw = 0
self.sumw2 = 0
self.sumwx = 0
self.sumw2x = 0
self.underflow = 0
self.overflow = 0
self.array = numpy.array([])
def ComputeIntegral(self):
self.integral = 0
for i in range(0,len(self.array)):
self.integral+=self.array[i]
self.integral += self.overflow
self.integral += self.underflow
def Print(self):
logging.info('nevents='+str(self.nevents)+\
' entries='+str(self.entries))
logging.info('sumw='+str(self.sumw)+\
' sumw2='+str(self.sumw2)+\
' sumwx='+str(self.sumwx)+\
' sumw2x='+str(self.sumw2x))
logging.info('underflow='+str(self.underflow)+\
' overflow='+str(self.overflow))
def GetMean(self):
if self.sumw==0:
return 0.
else:
return self.sumwx / self.sumw
def GetRMS(self):
if self.sumw==0:
return 0.
else:
mean = self.GetMean()
return sqrt(abs(self.sumw2x/self.sumw - mean*mean))
| gpl-3.0 | 4,464,193,754,576,342,000 | 27.494253 | 80 | 0.541751 | false |
Pandaaaa906/ChemErpSystem | ERP/templatetags/erp_extras.py | 1 | 1616 | # -*- coding: utf-8 -*-
from django import template
from django.db.models.query import QuerySet
import datetime
from django import template
from django.contrib.auth.models import Group
from django.contrib.auth.models import User
from django.db.models import Q
register = template.Library()
@register.inclusion_tag('sidebar_tree.html')
def children_tag(person):
if isinstance(person, QuerySet):
children = person
else:
children = person.children.all()
#zip(children,map(children.content_type,children)
return {'children': children}
@register.filter(name='has_group')
def has_group(user, group_name):
return user.groups.filter(name=group_name).exists()
@register.filter(name='get_staff')
def get_staff(group_name):
group = Group.objects.get(name=group_name)
users = group.user_set.all()
return map(lambda x:x.id , users)
@register.filter(name='get_nameFromId')
def get_nameFromId(usrId):
if usrId:
user = User.objects.get(id=usrId)
return user.first_name+user.last_name
else:
return ""
@register.filter
def get_range( value ):
"""
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views
"""
return range( value )
@register.filter
def get_fields(model):
return model._meta.get_fields() | apache-2.0 | 684,355,398,414,032,100 | 25.080645 | 66 | 0.667698 | false |
rplevka/robottelo | tests/upgrades/test_repository.py | 1 | 9545 | """Test for Repository related Upgrade Scenarios
:Requirement: Upgraded Satellite
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Repositories
:Assignee: tpapaioa
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import os
from fabric.api import execute
from fabric.api import run
from nailgun import entities
from upgrade.helpers.docker import docker_execute_command
from upgrade_tests import post_upgrade
from upgrade_tests import pre_upgrade
from upgrade_tests.helpers.scenarios import create_dict
from upgrade_tests.helpers.scenarios import dockerize
from upgrade_tests.helpers.scenarios import get_entity_data
from upgrade_tests.helpers.scenarios import rpm1
from upgrade_tests.helpers.scenarios import rpm2
from robottelo import ssh
from robottelo.api.utils import create_sync_custom_repo
from robottelo.api.utils import promote
from robottelo.config import settings
from robottelo.logging import logger
from robottelo.upgrade_utility import create_repo
from robottelo.upgrade_utility import host_location_update
from robottelo.upgrade_utility import install_or_update_package
from robottelo.upgrade_utility import publish_content_view
UPSTREAM_USERNAME = 'rTtest123'
DOCKER_VM = settings.upgrade.docker_vm
FILE_PATH = '/var/www/html/pub/custom_repo/'
CUSTOM_REPO = f'https://{settings.server.hostname}/pub/custom_repo'
_, RPM1_NAME = os.path.split(rpm1)
_, RPM2_NAME = os.path.split(rpm2)
class TestScenarioRepositoryUpstreamAuthorizationCheck:
"""This test scenario is to verify the upstream username in post-upgrade for a custom
repository which does have a upstream username but not password set on it in pre-upgrade.
Test Steps:
1. Before Satellite upgrade, Create a custom repository and sync it.
2. Set the upstream username on same repository using foreman-rake.
3. Upgrade Satellite.
4. Check if the upstream username value is removed for same repository.
"""
@pre_upgrade
def test_pre_repository_scenario_upstream_authorization(self):
"""Create a custom repository and set the upstream username on it.
:id: preupgrade-11c5ceee-bfe0-4ce9-8f7b-67a835baf522
:steps:
1. Create a custom repository and sync it.
2. Set the upstream username on same repository using foreman-rake.
:expectedresults:
1. Upstream username should be set on repository.
:BZ: 1641785
"""
org = entities.Organization().create()
custom_repo = create_sync_custom_repo(org_id=org.id)
rake_repo = f'repo = Katello::Repository.find_by_id({custom_repo})'
rake_username = f'; repo.root.upstream_username = "{UPSTREAM_USERNAME}"'
rake_repo_save = '; repo.save!(validate: false)'
result = run(f"echo '{rake_repo}{rake_username}{rake_repo_save}'|foreman-rake console")
assert 'true' in result
global_dict = {self.__class__.__name__: {'repo_id': custom_repo}}
create_dict(global_dict)
@post_upgrade(depend_on=test_pre_repository_scenario_upstream_authorization)
def test_post_repository_scenario_upstream_authorization(self):
"""Verify upstream username for pre-upgrade created repository.
:id: postupgrade-11c5ceee-bfe0-4ce9-8f7b-67a835baf522
:steps:
1. Verify upstream username for pre-upgrade created repository using
foreman-rake.
:expectedresults:
1. upstream username should not exists on same repository.
:BZ: 1641785
"""
repo_id = get_entity_data(self.__class__.__name__)['repo_id']
rake_repo = f'repo = Katello::RootRepository.find_by_id({repo_id})'
rake_username = '; repo.root.upstream_username'
result = run(f"echo '{rake_repo}{rake_username}'|foreman-rake console")
assert UPSTREAM_USERNAME not in result
class TestScenarioCustomRepoCheck:
"""Scenario test to verify if we can create a custom repository and consume it
via client then we alter the created custom repository and satellite will be able
to sync back the repo.
Test Steps:
1. Before Satellite upgrade.
2. Create new Organization and Location.
3. Create Product, custom repo, cv.
4. Create activation key and add subscription in it.
5. Create a content host, register and install package on it.
6. Upgrade Satellite.
7. Remove Old package and add new package into custom repo.
8. Sync repo, publish new version of cv.
9. Try to install new package on client.
BZ: 1429201,1698549
"""
@pre_upgrade
def test_pre_scenario_custom_repo_check(self):
"""This is pre-upgrade scenario test to verify if we can create a
custom repository and consume it via content host.
:id: preupgrade-eb6831b1-c5b6-4941-a325-994a09467478
:steps:
1. Before Satellite upgrade.
2. Create new Organization, Location.
3. Create Product, custom repo, cv.
4. Create activation key and add subscription.
5. Create a content host, register and install package on it.
:expectedresults:
1. Custom repo is created.
2. Package is installed on Content host.
"""
org = entities.Organization().create()
loc = entities.Location(organization=[org]).create()
lce = entities.LifecycleEnvironment(organization=org).create()
product = entities.Product(organization=org).create()
create_repo(rpm1, FILE_PATH)
repo = entities.Repository(product=product.id, url=CUSTOM_REPO).create()
repo.sync()
content_view = publish_content_view(org=org, repolist=repo)
promote(content_view.version[0], lce.id)
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}/'
'Packages/b/|grep {}'.format(
org.label, lce.name, content_view.label, product.label, repo.label, RPM1_NAME
)
)
assert result.return_code == 0
assert len(result.stdout) >= 1
subscription = entities.Subscription(organization=org).search(
query={'search': f'name={product.name}'}
)[0]
ak = entities.ActivationKey(
content_view=content_view, organization=org.id, environment=lce
).create()
ak.add_subscriptions(data={'subscription_id': subscription.id})
rhel7_client = dockerize(ak_name=ak.name, distro='rhel7', org_label=org.label)
client_container_id = [value for value in rhel7_client.values()][0]
client_container_name = [key for key in rhel7_client.keys()][0]
host_location_update(
client_container_name=client_container_name, logger_obj=logger, loc=loc
)
status = execute(
docker_execute_command,
client_container_id,
'subscription-manager identity',
host=DOCKER_VM,
)[DOCKER_VM]
assert org.name in status
install_or_update_package(client_hostname=client_container_id, package=RPM1_NAME)
scenario_dict = {
self.__class__.__name__: {
'content_view_name': content_view.name,
'lce_id': lce.id,
'lce_name': lce.name,
'org_label': org.label,
'prod_label': product.label,
'rhel_client': rhel7_client,
'repo_name': repo.name,
}
}
create_dict(scenario_dict)
@post_upgrade(depend_on=test_pre_scenario_custom_repo_check)
def test_post_scenario_custom_repo_check(self):
"""This is post-upgrade scenario test to verify if we can alter the
created custom repository and satellite will be able to sync back
the repo.
:id: postupgrade-5c793577-e573-46a7-abbf-b6fd1f20b06e
:steps:
1. Remove old and add new package into custom repo.
2. Sync repo , publish the new version of cv.
3. Try to install new package on client.
:expectedresults: Content host should able to pull the new rpm.
"""
entity_data = get_entity_data(self.__class__.__name__)
client = entity_data.get('rhel_client')
client_container_id = list(client.values())[0]
content_view_name = entity_data.get('content_view_name')
lce_id = entity_data.get('lce_id')
lce_name = entity_data.get('lce_name')
org_label = entity_data.get('org_label')
prod_label = entity_data.get('prod_label')
repo_name = entity_data.get('repo_name')
create_repo(rpm2, FILE_PATH, post_upgrade=True, other_rpm=rpm1)
repo = entities.Repository(name=repo_name).search()[0]
repo.sync()
content_view = entities.ContentView(name=content_view_name).search()[0]
content_view.publish()
content_view = entities.ContentView(name=content_view_name).search()[0]
promote(content_view.version[-1], lce_id)
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}/'
'Packages/c/| grep {}'.format(
org_label, lce_name, content_view.label, prod_label, repo.label, RPM2_NAME
)
)
assert result.return_code == 0
assert len(result.stdout) >= 1
install_or_update_package(client_hostname=client_container_id, package=RPM2_NAME)
| gpl-3.0 | 6,445,310,977,398,865,000 | 35.996124 | 95 | 0.651545 | false |
geopython/QGIS | tests/src/python/test_provider_memory.py | 1 | 27965 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the memory layer provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '2015-04-23'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (
QgsField,
QgsFields,
QgsLayerDefinition,
QgsPointXY,
QgsReadWriteContext,
QgsVectorLayer,
QgsFeatureRequest,
QgsFeature,
QgsGeometry,
QgsWkbTypes,
NULL,
QgsMemoryProviderUtils,
QgsCoordinateReferenceSystem,
QgsRectangle,
QgsTestUtils
)
from qgis.testing import (
start_app,
unittest
)
from utilities import (
unitTestDataPath,
compareWkt
)
from providertestbase import ProviderTestCase
from qgis.PyQt.QtCore import QVariant, QByteArray
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsMemoryProvider(unittest.TestCase, ProviderTestCase):
@classmethod
def createLayer(cls):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
assert (vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, -200, NULL, 'NuLl', '5'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature()
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3'])
f3 = QgsFeature()
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1'])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature()
f4.setAttributes([2, 200, 'Apple', 'Apple', '2'])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature()
f5.setAttributes([4, 400, 'Honey', 'Honey', '4'])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
return vl
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
cls.vl = cls.createLayer()
assert (cls.vl.isValid())
cls.source = cls.vl.dataProvider()
# poly layer
cls.poly_vl = QgsVectorLayer('Polygon?crs=epsg:4326&field=pk:integer&key=pk',
'test', 'memory')
assert (cls.poly_vl.isValid())
cls.poly_provider = cls.poly_vl.dataProvider()
f1 = QgsFeature()
f1.setAttributes([1])
f1.setGeometry(QgsGeometry.fromWkt('Polygon ((-69.03664108 81.35818902, -69.09237722 80.24346619, -73.718477 80.1319939, -73.718477 76.28620011, -74.88893598 76.34193625, -74.83319983 81.35818902, -69.03664108 81.35818902))'))
f2 = QgsFeature()
f2.setAttributes([2])
f2.setGeometry(QgsGeometry.fromWkt('Polygon ((-67.58750139 81.1909806, -66.30557012 81.24671674, -66.30557012 76.89929767, -67.58750139 76.89929767, -67.58750139 81.1909806))'))
f3 = QgsFeature()
f3.setAttributes([3])
f3.setGeometry(QgsGeometry.fromWkt('Polygon ((-68.36780737 75.78457483, -67.53176524 72.60761475, -68.64648808 73.66660144, -70.20710006 72.9420316, -68.36780737 75.78457483))'))
f4 = QgsFeature()
f4.setAttributes([4])
cls.poly_provider.addFeatures([f1, f2, f3, f4])
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def getEditableLayer(self):
return self.createLayer()
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this test for memory provider, as it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this test for memory provider, as it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testCtors(self):
testVectors = ["Point", "LineString", "Polygon", "MultiPoint", "MultiLineString", "MultiPolygon", "None"]
for v in testVectors:
layer = QgsVectorLayer(v, "test", "memory")
assert layer.isValid(), "Failed to create valid %s memory layer" % (v)
def testLayerGeometry(self):
testVectors = [("Point", QgsWkbTypes.PointGeometry, QgsWkbTypes.Point),
("LineString", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineString),
("Polygon", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.Polygon),
("MultiPoint", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPoint),
("MultiLineString", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineString),
("MultiPolygon", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygon),
("PointZ", QgsWkbTypes.PointGeometry, QgsWkbTypes.PointZ),
("LineStringZ", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineStringZ),
("PolygonZ", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.PolygonZ),
("MultiPointZ", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPointZ),
("MultiLineStringZ", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineStringZ),
("MultiPolygonZ", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygonZ),
("PointM", QgsWkbTypes.PointGeometry, QgsWkbTypes.PointM),
("LineStringM", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineStringM),
("PolygonM", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.PolygonM),
("MultiPointM", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPointM),
("MultiLineStringM", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineStringM),
("MultiPolygonM", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygonM),
("PointZM", QgsWkbTypes.PointGeometry, QgsWkbTypes.PointZM),
("LineStringZM", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineStringZM),
("PolygonZM", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.PolygonZM),
("MultiPointZM", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPointZM),
("MultiLineStringZM", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineStringZM),
("MultiPolygonZM", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygonZM),
("Point25D", QgsWkbTypes.PointGeometry, QgsWkbTypes.Point25D),
("LineString25D", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineString25D),
("Polygon25D", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.Polygon25D),
("MultiPoint25D", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPoint25D),
("MultiLineString25D", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineString25D),
("MultiPolygon25D", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygon25D),
("None", QgsWkbTypes.NullGeometry, QgsWkbTypes.NoGeometry)]
for v in testVectors:
layer = QgsVectorLayer(v[0], "test", "memory")
myMessage = ('Expected: %s\nGot: %s\n' %
(v[1], layer.geometryType()))
assert layer.geometryType() == v[1], myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(v[2], layer.wkbType()))
assert layer.wkbType() == v[2], myMessage
def testAddFeatures(self):
layer = QgsVectorLayer("Point", "test", "memory")
provider = layer.dataProvider()
res = provider.addAttributes([QgsField("name", QVariant.String),
QgsField("age", QVariant.Int),
QgsField("size", QVariant.Double)])
assert res, "Failed to add attributes"
myMessage = ('Expected: %s\nGot: %s\n' %
(3, len(provider.fields())))
assert len(provider.fields()) == 3, myMessage
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(["Johny",
20,
0.3])
res, t = provider.addFeatures([ft])
assert res, "Failed to add feature"
myMessage = ('Expected: %s\nGot: %s\n' %
(1, provider.featureCount()))
assert provider.featureCount() == 1, myMessage
for f in provider.getFeatures(QgsFeatureRequest()):
myMessage = ('Expected: %s\nGot: %s\n' %
("Johny", f[0]))
assert f[0] == "Johny", myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(20, f[1]))
assert f[1] == 20, myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(0.3, f[2]))
assert (f[2] - 0.3) < 0.0000001, myMessage
geom = f.geometry()
myMessage = ('Expected: %s\nGot: %s\n' %
("Point (10 10)", str(geom.asWkt())))
assert compareWkt(str(geom.asWkt()), "Point (10 10)"), myMessage
def testGetFields(self):
layer = QgsVectorLayer("Point", "test", "memory")
provider = layer.dataProvider()
provider.addAttributes([QgsField("name", QVariant.String),
QgsField("age", QVariant.Int),
QgsField("size", QVariant.Double)])
myMessage = ('Expected: %s\nGot: %s\n' %
(3, len(provider.fields())))
assert len(provider.fields()) == 3, myMessage
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(["Johny",
20,
0.3])
provider.addFeatures([ft])
for f in provider.getFeatures(QgsFeatureRequest()):
myMessage = ('Expected: %s\nGot: %s\n' %
("Johny", f['name']))
self.assertEqual(f["name"], "Johny", myMessage)
def testFromUri(self):
"""Test we can construct the mem provider from a uri"""
myMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=age:integer&field=size:double&index=yes'),
'test',
'memory')
assert myMemoryLayer is not None, 'Provider not initialized'
myProvider = myMemoryLayer.dataProvider()
assert myProvider is not None
def testLengthPrecisionFromUri(self):
"""Test we can assign length and precision from a uri"""
myMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=size:double(12,9)&index=yes'),
'test',
'memory')
self.assertEqual(myMemoryLayer.fields().field('size').length(), 12)
self.assertEqual(myMemoryLayer.fields().field('size').precision(), 9)
def testFromUriWithEncodedField(self):
"""Test we can construct the mem provider from a uri when a field name is encoded"""
layer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=test%2Ffield:integer'),
'test',
'memory')
self.assertTrue(layer.isValid())
self.assertEqual([f.name() for f in layer.fields()], ['name', 'test/field'])
def testSaveFields(self):
# Create a new memory layer with no fields
myMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&index=yes'),
'test',
'memory')
# Add some fields to the layer
myFields = [QgsField('TestInt', QVariant.Int, 'integer', 2, 0),
QgsField('TestLong', QVariant.LongLong, 'long', -1, 0),
QgsField('TestDbl', QVariant.Double, 'double', 8, 6),
QgsField('TestString', QVariant.String, 'string', 50, 0),
QgsField('TestDate', QVariant.Date, 'date'),
QgsField('TestTime', QVariant.Time, 'time'),
QgsField('TestDateTime', QVariant.DateTime, 'datetime')]
assert myMemoryLayer.startEditing()
for f in myFields:
assert myMemoryLayer.addAttribute(f)
assert myMemoryLayer.commitChanges()
myMemoryLayer.updateFields()
# Export the layer to a layer-definition-XML
qlr = QgsLayerDefinition.exportLayerDefinitionLayers([myMemoryLayer], QgsReadWriteContext())
assert qlr is not None
# Import the layer from the layer-definition-XML
layers = QgsLayerDefinition.loadLayerDefinitionLayers(qlr, QgsReadWriteContext())
assert layers is not None
myImportedLayer = layers[0]
assert myImportedLayer is not None
# Check for the presence of the fields
importedFields = myImportedLayer.fields()
assert importedFields is not None
for f in myFields:
assert f == importedFields.field(f.name())
def testRenameAttributes(self):
layer = QgsVectorLayer("Point", "test", "memory")
provider = layer.dataProvider()
res = provider.addAttributes([QgsField("name", QVariant.String),
QgsField("age", QVariant.Int),
QgsField("size", QVariant.Double)])
layer.updateFields()
assert res, "Failed to add attributes"
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(["Johny",
20,
0.3])
res, t = provider.addFeatures([ft])
# bad rename
self.assertFalse(provider.renameAttributes({-1: 'not_a_field'}))
self.assertFalse(provider.renameAttributes({100: 'not_a_field'}))
# already exists
self.assertFalse(provider.renameAttributes({1: 'name'}))
# rename one field
self.assertTrue(provider.renameAttributes({1: 'this_is_the_new_age'}))
self.assertEqual(provider.fields().at(1).name(), 'this_is_the_new_age')
layer.updateFields()
fet = next(layer.getFeatures())
self.assertEqual(fet.fields()[1].name(), 'this_is_the_new_age')
# rename two fields
self.assertTrue(provider.renameAttributes({1: 'mapinfo_is_the_stone_age', 2: 'super_size'}))
self.assertEqual(provider.fields().at(1).name(), 'mapinfo_is_the_stone_age')
self.assertEqual(provider.fields().at(2).name(), 'super_size')
layer.updateFields()
fet = next(layer.getFeatures())
self.assertEqual(fet.fields()[1].name(), 'mapinfo_is_the_stone_age')
self.assertEqual(fet.fields()[2].name(), 'super_size')
def testUniqueSource(self):
"""
Similar memory layers should have unique source - some code checks layer source to identify
matching layers
"""
layer = QgsVectorLayer("Point", "test", "memory")
layer2 = QgsVectorLayer("Point", "test2", "memory")
self.assertNotEqual(layer.source(), layer2.source())
def testCreateMemoryLayer(self):
"""
Test QgsMemoryProviderUtils.createMemoryLayer()
"""
# no fields
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields())
self.assertTrue(layer.isValid())
self.assertEqual(layer.name(), 'my name')
self.assertTrue(layer.fields().isEmpty())
# similar layers should have unique sources
layer2 = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields())
self.assertNotEqual(layer.source(), layer2.source())
# geometry type
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields(), QgsWkbTypes.Point)
self.assertTrue(layer.isValid())
self.assertEqual(layer.wkbType(), QgsWkbTypes.Point)
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields(), QgsWkbTypes.PolygonZM)
self.assertTrue(layer.isValid())
self.assertEqual(layer.wkbType(), QgsWkbTypes.PolygonZM)
# crs
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields(), QgsWkbTypes.PolygonZM, QgsCoordinateReferenceSystem.fromEpsgId(3111))
self.assertTrue(layer.isValid())
self.assertEqual(layer.wkbType(), QgsWkbTypes.PolygonZM)
self.assertTrue(layer.crs().isValid())
self.assertEqual(layer.crs().authid(), 'EPSG:3111')
# fields
fields = QgsFields()
fields.append(QgsField("string", QVariant.String))
fields.append(QgsField("long", QVariant.LongLong))
fields.append(QgsField("double", QVariant.Double))
fields.append(QgsField("integer", QVariant.Int))
fields.append(QgsField("date", QVariant.Date))
fields.append(QgsField("datetime", QVariant.DateTime))
fields.append(QgsField("time", QVariant.Time))
fields.append(QgsField("#complex_name", QVariant.String))
fields.append(QgsField("complex/name", QVariant.String))
fields.append(QgsField("binaryfield", QVariant.ByteArray))
fields.append(QgsField("boolfield", QVariant.Bool))
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', fields)
self.assertTrue(layer.isValid())
self.assertFalse(layer.fields().isEmpty())
self.assertEqual(len(layer.fields()), len(fields))
for i in range(len(fields)):
self.assertEqual(layer.fields()[i].name(), fields[i].name())
self.assertEqual(layer.fields()[i].type(), fields[i].type())
# unsupported field type
fields = QgsFields()
fields.append(QgsField("rect", QVariant.RectF))
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', fields)
self.assertTrue(layer.isValid())
self.assertFalse(layer.fields().isEmpty())
self.assertEqual(layer.fields()[0].name(), 'rect')
self.assertEqual(layer.fields()[0].type(), QVariant.String) # should be mapped to string
def testThreadSafetyWithIndex(self):
layer = QgsVectorLayer('Point?crs=epsg:4326&index=yes&field=pk:integer&field=cnt:int8&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
provider = layer.dataProvider()
f = QgsFeature()
f.setAttributes([5, -200, NULL, 'NuLl', '5'])
f.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
for i in range(100000):
provider.addFeatures([f])
# filter rect request
extent = QgsRectangle(-73, 70, -63, 80)
request = QgsFeatureRequest().setFilterRect(extent)
self.assertTrue(QgsTestUtils.testProviderIteratorThreadSafety(self.source, request))
def testMinMaxCache(self):
"""
Test that min/max cache is appropriately cleared
:return:
"""
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=f1:integer&field=f2:integer',
'test', 'memory')
self.assertTrue(vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, -200])
f2 = QgsFeature()
f2.setAttributes([3, 300])
f3 = QgsFeature()
f3.setAttributes([1, 100])
f4 = QgsFeature()
f4.setAttributes([2, 200])
f5 = QgsFeature()
f5.setAttributes([4, 400])
res, [f1, f2, f3, f4, f5] = vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 5)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# add feature
f6 = QgsFeature()
f6.setAttributes([15, 1400])
res, [f6] = vl.dataProvider().addFeatures([f6])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 15)
self.assertEqual(vl.dataProvider().maximumValue(1), 1400)
f7 = QgsFeature()
f7.setAttributes([-1, -1400])
res, [f7] = vl.dataProvider().addFeatures([f7])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), -1)
self.assertEqual(vl.dataProvider().minimumValue(1), -1400)
self.assertEqual(vl.dataProvider().maximumValue(0), 15)
self.assertEqual(vl.dataProvider().maximumValue(1), 1400)
# change attribute values
self.assertTrue(vl.dataProvider().changeAttributeValues({f6.id(): {0: 3, 1: 150}, f7.id(): {0: 4, 1: -100}}))
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 5)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# delete features
self.assertTrue(vl.dataProvider().deleteFeatures([f4.id(), f1.id()]))
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -100)
self.assertEqual(vl.dataProvider().maximumValue(0), 4)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# delete attributes
self.assertTrue(vl.dataProvider().deleteAttributes([0]))
self.assertEqual(vl.dataProvider().minimumValue(0), -100)
self.assertEqual(vl.dataProvider().maximumValue(0), 400)
def testBinary(self):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=f1:integer&field=f2:binary',
'test', 'memory')
self.assertTrue(vl.isValid())
dp = vl.dataProvider()
fields = dp.fields()
self.assertEqual([f.name() for f in fields], ['f1', 'f2'])
self.assertEqual([f.type() for f in fields], [QVariant.Int, QVariant.ByteArray])
self.assertEqual([f.typeName() for f in fields], ['integer', 'binary'])
f = QgsFeature(dp.fields())
bin_1 = b'xxx'
bin_val1 = QByteArray(bin_1)
f.setAttributes([1, bin_val1])
self.assertTrue(dp.addFeature(f))
f2 = [f for f in dp.getFeatures()][0]
self.assertEqual(f2.attributes(), [1, bin_val1])
# add binary field
self.assertTrue(dp.addAttributes([QgsField('binfield2', QVariant.ByteArray, 'Binary')]))
fields = dp.fields()
bin2_field = fields[fields.lookupField('binfield2')]
self.assertEqual(bin2_field.type(), QVariant.ByteArray)
self.assertEqual(bin2_field.typeName(), 'Binary')
f = QgsFeature(fields)
bin_2 = b'yyy'
bin_val2 = QByteArray(bin_2)
f.setAttributes([2, NULL, bin_val2])
self.assertTrue(dp.addFeature(f))
f1 = [f for f in dp.getFeatures()][0]
self.assertEqual(f1.attributes(), [1, bin_val1, NULL])
f2 = [f for f in dp.getFeatures()][1]
self.assertEqual(f2.attributes(), [2, NULL, bin_val2])
def testBool(self):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=f1:integer&field=f2:bool',
'test', 'memory')
self.assertTrue(vl.isValid())
dp = vl.dataProvider()
fields = dp.fields()
self.assertEqual([f.name() for f in fields], ['f1', 'f2'])
self.assertEqual([f.type() for f in fields], [QVariant.Int, QVariant.Bool])
self.assertEqual([f.typeName() for f in fields], ['integer', 'boolean'])
f = QgsFeature(dp.fields())
f.setAttributes([1, True])
f2 = QgsFeature(dp.fields())
f2.setAttributes([2, False])
f3 = QgsFeature(dp.fields())
f3.setAttributes([3, NULL])
self.assertTrue(dp.addFeatures([f, f2, f3]))
self.assertEqual([f.attributes() for f in dp.getFeatures()], [[1, True], [2, False], [3, NULL]])
# add boolean field
self.assertTrue(dp.addAttributes([QgsField('boolfield2', QVariant.Bool, 'Boolean')]))
fields = dp.fields()
bool2_field = fields[fields.lookupField('boolfield2')]
self.assertEqual(bool2_field.type(), QVariant.Bool)
self.assertEqual(bool2_field.typeName(), 'Boolean')
f = QgsFeature(fields)
f.setAttributes([2, NULL, True])
self.assertTrue(dp.addFeature(f))
self.assertEqual([f.attributes() for f in dp.getFeatures()], [[1, True, NULL], [2, False, NULL], [3, NULL, NULL], [2, NULL, True]])
class TestPyQgsMemoryProviderIndexed(unittest.TestCase, ProviderTestCase):
"""Runs the provider test suite against an indexed memory layer"""
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
cls.vl = QgsVectorLayer('Point?crs=epsg:4326&index=yes&field=pk:integer&field=cnt:int8&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
assert (cls.vl.isValid())
cls.source = cls.vl.dataProvider()
f1 = QgsFeature()
f1.setAttributes([5, -200, NULL, 'NuLl', '5'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature()
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3'])
f3 = QgsFeature()
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1'])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature()
f4.setAttributes([2, 200, 'Apple', 'Apple', '2'])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature()
f5.setAttributes([4, 400, 'Honey', 'Honey', '4'])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
cls.source.addFeatures([f1, f2, f3, f4, f5])
# poly layer
cls.poly_vl = QgsVectorLayer('Polygon?crs=epsg:4326&index=yes&field=pk:integer&key=pk',
'test', 'memory')
assert (cls.poly_vl.isValid())
cls.poly_provider = cls.poly_vl.dataProvider()
f1 = QgsFeature()
f1.setAttributes([1])
f1.setGeometry(QgsGeometry.fromWkt('Polygon ((-69.0 81.4, -69.0 80.2, -73.7 80.2, -73.7 76.3, -74.9 76.3, -74.9 81.4, -69.0 81.4))'))
f2 = QgsFeature()
f2.setAttributes([2])
f2.setGeometry(QgsGeometry.fromWkt('Polygon ((-67.6 81.2, -66.3 81.2, -66.3 76.9, -67.6 76.9, -67.6 81.2))'))
f3 = QgsFeature()
f3.setAttributes([3])
f3.setGeometry(QgsGeometry.fromWkt('Polygon ((-68.4 75.8, -67.5 72.6, -68.6 73.7, -70.2 72.9, -68.4 75.8))'))
f4 = QgsFeature()
f4.setAttributes([4])
cls.poly_provider.addFeatures([f1, f2, f3, f4])
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this test for memory provider, as it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this test for memory provider, as it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -186,420,792,356,683,260 | 40.989489 | 234 | 0.604613 | false |
YevgeniyaK/python_training | fixture/group.py | 1 | 4957 | from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def return_to_group_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
'''
Форма создания новой группы
'''
def create(self, group):
wd = self.app.wd
self.open_group_page()
# init group greation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_group_page()
self.group_cache = None
'''
Удаление первой группы
'''
def delete_first_group(self):
self.delete_group_by_index(0)
'''
Удаление рандомной группы
'''
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
#submit deletion
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
'''
Модификация группы
'''
def change_group(self, group):
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("edit").click()
self.fill_group_form(group)
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
'''
Модификация первой группы
'''
def modify_first_group(self):
self.modify_group_by_index(0)
'''
Модификация рандомной группы
'''
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
# modification
wd.find_element_by_name("edit").click()
# fill
self.fill_group_form(new_group_data)
# submit
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
'''
fill group form
'''
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
'''
отдельный метод выбора первой группы
'''
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
'''
метод выбора рандомной группы
'''
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
'''
проверка на существование групп
'''
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
'''
Получение списка групп
'''
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name = text.strip(), id = id))
return list(self.group_cache)
def delete_group_by_id(self, id):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def modify_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
# modification
wd.find_element_by_name("edit").click()
# fill
self.fill_group_form(new_group_data)
# submit
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
| apache-2.0 | -8,982,111,332,978,443,000 | 26.114943 | 100 | 0.569097 | false |
patcorwin/fossil | pdil/tool/fossil/rigging/splineTwist.py | 1 | 24347 | from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import math
from pymel.core import curve, cluster, delete, dt, duplicate, expression, group, hide, ikHandle, insertKnotCurve, joint, move, orientConstraint, parent, parentConstraint, pointConstraint, xform
from ....add import simpleName, shortName
from .... import core
from .... import nodeApi
from .. import controllerShape
from .. import space
from ..cardRigging import MetaControl, ParamInfo
from . import _util as util
from .. import node
class OrientMode:
CLOSEST_JOINT = 'closest_joint'
WORLD = 'world'
AS_FIRST_JOINT = 'as_first_joint'
class TwistStyle:
'''
Used by splineIk. Advanced uses advanced twist while the others determin
which rotation axis drives the twist attribute.
'''
ADVANCED = 'Advanced'
X = 'X'
NEG_X = '-X'
Y = 'Y'
NEG_Y = '-Y'
Z = 'Z'
NEG_Z = '-Z'
@classmethod
def asChoices(cls):
choices = OrderedDict()
choices[cls.ADVANCED] = cls.ADVANCED
choices[cls.X] = cls.X
choices[cls.NEG_X] = cls.NEG_X
choices[cls.Y] = cls.Y
choices[cls.NEG_Y] = cls.NEG_Y
choices[cls.Z] = cls.Z
choices[cls.NEG_Z] = cls.NEG_Z
return choices
@util.adds('twist', 'stretch')
@util.defaultspec( {'shape': 'sphere', 'size': 10, 'color': 'blue 0.22'} )
def buildSplineTwist(start, end, controlCountOrCrv=4, twistInfDist=0, simplifyCurve=True,
tipBend=True, sourceBend=True, matchOrient=True, allowOffset=True, # noqa e128
useLeadOrient=False, # This is an backwards compatible option, mutually exclusive with matchOrient
twistStyle=TwistStyle.ADVANCED, duplicateCurve=True,
controlOrient=OrientMode.CLOSEST_JOINT,
name='', groupName='', controlSpec={}):
'''
Make a spline controller from `start` to `end`.
:param int twistInfDist: Default twist controls to falloff before hitting eachother.
Otherwise it is the number of joints on either side it will influence.
:param bool simplifyCurve: Only used if # of cvs is specified. Turning it
on will likely result it the curve not matching the existing joint position
but will be more evenly spaced per control.
:param bool tipBend: If True, an extra cv will be added at the second to
last joint, controlled by the last controller to ease out.
##:param bool applyDirectly: If True, rig the given joints, do not make a duplicate chain
:param bool useLeadOrient: If True, the controllers will be aligned the same
as the first joint.
**NOTE** I think this option only exists to preserve previous builds, this is pretty dumb
:param bool matchOrient: Does trueZero on the start and end. I'm not sure this makes sense.
.. todo::
* Add the same spline chain +X towards child that the neck has and test out advancedTwist()
* See if I can identify the closest joint to a control and orient to that
* The first joint has parent AND local, which are the same thing, keep this for convenience of selecting all the controls and editing attrs?
* Test specifying your own curve
* There is a float division error that can happen if there are too many control cvs.
* Verify twists work right with unsimplified curves (hint, I don't think they do).
'''
matchOrient = False
useLeadOrient = False
if isinstance( controlCountOrCrv, int ):
assert controlCountOrCrv > 3, "controlCount must be at least 4"
# The axis to twist and stretch on.
jointAxis = util.identifyAxis( start.listRelatives(type='joint')[0] )
# Make a duplicate chain for the IK that will also stretch.
stretchingChain = util.dupChain( start, end, '{0}_stretch' )
# &&& NOTE! This might affect advanced twist in some way.
# If the chain is mirrored, we need to reorient to point down x so the
# spline doesn't mess up when the main control rotates
if stretchingChain[1].tx.get() < 0:
# Despite aggresive zeroing of the source, the dup can still end up slightly
# off zero so force it.
for jnt in stretchingChain:
jnt.r.set(0, 0, 0)
joint( stretchingChain[0], e=True, oj='xyz', secondaryAxisOrient='yup', zso=True, ch=True)
joint( stretchingChain[-1], e=True, oj='none')
if isinstance( controlCountOrCrv, int ):
mainIk, _effector, crv = ikHandle( sol='ikSplineSolver',
sj=stretchingChain[0],
ee=stretchingChain[-1],
ns=controlCountOrCrv - 3,
simplifyCurve=simplifyCurve)
else:
if duplicateCurve:
crv = duplicate(controlCountOrCrv)[0]
else:
crv = controlCountOrCrv
mainIk, _effector = ikHandle( sol='ikSplineSolver',
sj=stretchingChain[0],
ee=stretchingChain[-1],
ccv=False,
pcv=False)
crv.getShape().worldSpace[0] >> mainIk.inCurve
hide(mainIk)
mainIk.rename( simpleName(start, "{0}_ikHandle") )
crv.rename( simpleName(start, "{0}_curve") )
if not name:
name = util.trimName(start)
if name.count(' '):
name, endName = name.split()
else:
endName = ''
# Only add a tipBend cv if number of cvs was specified.
if tipBend and isinstance( controlCountOrCrv, int ):
currentTrans = [ xform(cv, q=True, ws=True, t=True) for cv in crv.cv ]
insertKnotCurve( crv.u[1], nk=1, add=False, ib=False, rpo=True, cos=True, ch=True)
for pos, cv in zip(currentTrans, crv.cv[:-2]):
xform( cv, ws=True, t=pos )
xform( crv.cv[-2], ws=True, t=xform(end.getParent(), q=True, ws=True, t=True) )
xform( crv.cv[-1], ws=True, t=currentTrans[-1] )
# Only add a sourceBend cv if number of cvs was specified.
if sourceBend and isinstance( controlCountOrCrv, int ):
currentTrans = [ xform(cv, q=True, ws=True, t=True) for cv in crv.cv ]
insertKnotCurve( crv.u[1.2], nk=1, add=False, ib=False, rpo=True, cos=True, ch=True) # I honestly don't know why, but 1.2 must be different than 1.0
for pos, cv in zip(currentTrans[1:], crv.cv[2:]):
xform( cv, ws=True, t=pos )
xform( crv.cv[0], ws=True, t=currentTrans[0] )
xform( crv.cv[1], ws=True, t=xform(stretchingChain[1], q=True, ws=True, t=True) )
grp = group(em=True, p=node.mainGroup(), n=start.name() + "_splineTwist")
controls = util.addControlsToCurve(name + 'Ctrl', crv, controlSpec['main'])
for ctrl in controls:
core.dagObj.zero(ctrl).setParent( grp )
if controlOrient == OrientMode.CLOSEST_JOINT:
# Use the real chain to match orientations since the stretching chain might reorient to compensate for mirroring.
jointPos = {j: dt.Vector(xform(j, q=True, ws=True, t=True)) for j in util.getChain(start, end)}
aveSpacing = util.chainLength(stretchingChain) / (len(stretchingChain) - 1)
for ctrl in controls:
cpos = dt.Vector(xform(ctrl, q=True, ws=True, t=True))
distances = [ ( (jpos - cpos).length() / aveSpacing, j) for j, jpos in jointPos.items() ]
distances.sort()
''' Just use the closest joint if within 10% of the average spacing
Possible future improvement, look at two joints, and determine if
the control is between them and inbetween the orientation.
'''
if True: # distances[0][0] < 100:
r = xform(distances[0][1], q=True, ro=True, ws=True)
with core.dagObj.Solo(ctrl):
xform(ctrl, ro=r, ws=True)
core.dagObj.zero(ctrl)
"""
# Otherwise split the distances by the percentages
else:
#m1 = xform(distances[0][1], q=True, m=True, ws=True)
#m2 = xform(distances[1][1], q=True, m=True, ws=True)
distA, jointA = distances[0]
distB, jointB = distances[1]
x, y, z = midOrient2(jointA, jointB)
matrix = list(x) + [0] + list(y) + [0] + list(z) + [0] + xform(ctrl, q=True, ws=True, t=True) + [1.0]
print( ctrl, 'to', jointA, jointB )
with Solo(ctrl):
xform(ctrl, ws=True, m=matrix)
# Need to improve my matrix skills, for now it's easy enough to just rotate it
#rotate(ctrl, [0, 180, 0], os=1, r=1)
core.dagObj.zero(ctrl)
"""
if endName:
controls[-1].rename(endName + 'Ctrl')
if matchOrient:
util.trueZeroSetup(start, controls[0])
util.trueZeroSetup(end, controls[-1])
if tipBend:
if useLeadOrient and not matchOrient:
controls[-1].setRotation( end.getRotation(space='world'), space='world' )
parent( controls[-2].getChildren(), controls[-1] )
name = controls[-2].name()
delete( core.dagObj.zero(controls[-2]) )
if not endName:
controls[-1].rename(name)
controls[-2] = controls[-1]
controls.pop()
#core.dagObj.zero(controls[-2]).setParent(controls[-1])
#channels = [t + a for t in 'trs' for a in 'xyz']
#for channel in channels:
# controls[-2].attr( channel ).setKeyable(False)
# controls[-2].attr( channel ).lock()
if sourceBend:
names = []
for ctrl in controls[1:-1]:
names.append( ctrl.name() )
ctrl.rename( '__temp' )
endNum = -1 if endName else None
for name, cur in zip(names, controls[2:endNum] ):
cur.rename(name)
if useLeadOrient and not matchOrient:
controls[0].setRotation( start.getRotation(space='world'), space='world' )
parent( controls[1].getChildren(), controls[0] )
delete( core.dagObj.zero(controls[1]) )
del controls[1]
controls[0] = nodeApi.RigController.convert(controls[0])
controls[0].container = grp
stretchAttr, jointLenMultiplier = util.makeStretchySpline(controls[0], mainIk)
connectingCurve = addConnectingCurve(controls)
controls[0].visibility >> connectingCurve.visibility
# Make twist for everything but hide them all and drive the ones that overlap
# with spline controllers by the spline control.
if not twistInfDist:
numJoints = countJoints(start, end)
twistInfDist = int(math.ceil( numJoints - len(controls) ) / float(len(controls) - 1))
twistInfDist = max(1, twistInfDist)
noInherit = group(em=True, p=grp, n='NoInheritTransform')
core.dagObj.lockTrans(noInherit)
core.dagObj.lockRot(noInherit)
core.dagObj.lockScale(noInherit)
noInherit.inheritsTransform.set(False)
noInherit.inheritsTransform.lock()
# &&& If simplify curve is ON, the last joint gets constrained to the spinner?
# Otherwise it gets constrained to the offset or stretch joint, which I think is correct.
if allowOffset:
# If allowOffset, make another chain to handle the difference in joint positions.
offsetChain = util.dupChain( start, end, '{0}_offset' )
offsetChain[0].setParent(noInherit)
hide(offsetChain[0])
twists, constraints = addTwistControls( offsetChain, start, end, twistInfDist)
finalRigJoint = offsetChain[-1]
else:
twists, constraints = addTwistControls( stretchingChain, start, end, twistInfDist )
finalRigJoint = stretchingChain[-1]
# Constrain the end to the last controller so it doesn't pop off at all,
# but still respect the stretch attr.
pointConstraint(finalRigJoint, end, e=True, rm=True)
# Make a proxy that can allows respecting stretch being active or not.
endProxy = duplicate(end, po=True)[0]
endProxy.rename('endProxy')
hide(endProxy)
endProxy.setParent(grp)
stretchAttr >> core.constraints.pointConst( controls[-1], endProxy, mo=True )
core.math.opposite(stretchAttr) >> core.constraints.pointConst( finalRigJoint, endProxy )
constraints.point >> core.constraints.pointConst( endProxy, end )
hide(twists)
numControls = len(controls)
numTwists = len(twists)
for i, ctrl in enumerate(controls):
index = int(round( i * ((numTwists - 1) / (numControls - 1)) ))
util.drive( ctrl, 'twist', twists[index].attr('r' + jointAxis) )
space.add( ctrl, start.getParent(), 'local' )
parents = [start.getParent()] + controls[:-1]
stretchingChain[0].setParent(noInherit)
crv.setParent(noInherit)
hide(crv, stretchingChain[0])
connectingCurve.setParent( noInherit )
mainIk.setParent(grp)
# Do not want to scale but let rotate for "fk-like" space mode
for ctrl, _parent in zip(controls, parents):
core.dagObj.lockScale( ctrl )
if useLeadOrient:
ctrl.setRotation( start.getRotation(space='world'), space='world' )
core.dagObj.zero(ctrl)
space.addMain(ctrl)
space.add( ctrl, _parent, 'parent')
for i, ctrl in enumerate(controls[1:]):
controls[0].subControl[str(i)] = ctrl
# Must constrain AFTER controls (possibly) get orientd
orientConstraint( controls[-1], finalRigJoint, mo=True )
# Setup advanced twist
if twistStyle == TwistStyle.ADVANCED:
# &&& Test using advancedTwist() to replace the code beloew
util.advancedTwist(stretchingChain[0], stretchingChain[1], controls[0], controls[-1], mainIk)
'''
startAxis = duplicate( start, po=True )[0]
startAxis.rename( 'startAxis' )
startAxis.setParent( controls[0] )
endAxis = duplicate( start, po=True )[0]
endAxis.rename( 'endAxis' )
endAxis.setParent( controls[-1] )
endAxis.t.set(0, 0, 0)
mainIk.dTwistControlEnable.set(1)
mainIk.dWorldUpType.set(4)
startAxis.worldMatrix[0] >> mainIk.dWorldUpMatrix
endAxis.worldMatrix[0] >> mainIk.dWorldUpMatrixEnd
hide(startAxis, endAxis)
'''
else:
if twistStyle == TwistStyle.X:
controls[-1].rx >> mainIk.twist
elif twistStyle == TwistStyle.NEG_X:
core.math.multiply(controls[-1].rx, -1.0) >> mainIk.twist
elif twistStyle == TwistStyle.Y:
controls[-1].ry >> mainIk.twist
elif twistStyle == TwistStyle.NEG_Y:
core.math.multiply(controls[-1].ry, -1.0) >> mainIk.twist
elif twistStyle == TwistStyle.Z:
controls[-1].rz >> mainIk.twist
elif twistStyle == TwistStyle.NEG_Z:
core.math.multiply(controls[-1].rz, -1.0) >> mainIk.twist
# To make .twist work, the chain needs to follow parent joint
follow = group(em=True, p=grp)
target = start.getParent()
core.dagObj.matchTo(follow, stretchingChain[0])
parentConstraint( target, follow, mo=1 )
follow.rename(target + '_follow')
stretchingChain[0].setParent(follow)
# Constraint the offset (if exists) to the stretch last to account for any adjustments.
if allowOffset:
util.constrainAtoB(offsetChain[:-1], stretchingChain[:-1])
pointConstraint(stretchingChain[-1], offsetChain[-1], mo=True)
return controls[0], constraints
def addTwistControls(controlChain, boundChain, boundEnd, influenceDist=3):
'''
Put a rotation controller under each child of the controlChain to drive .rz
of the boundChain. They must both be the same size.
:param Joint controlChain: The first joint of the controlling rig (ideally pruned)
:param Joint boundChain: The first joint of joints being controlled by the spline.
:param Joint boundEnd: The last joint in the bound chain, used to address possible branching.
:param int influenceDist: How many adjacent joints are influenced (total #
is 2x since it influences both directions).
'''
obj = controlChain[0]
target = boundChain
#controlJoints = getChain( controlChain, findChild(controlChain, shortName(boundEnd)) )
controlJoints = controlChain
boundJoints = util.getChain( boundChain, util.findChild(boundChain, shortName(boundEnd)) )
assert len(controlJoints) == len(boundJoints), "Failure when adding twist controls, somehow the chains don't match length, contorls {0} != {1}".format( len(controlJoints), len(boundJoints) )
controls = []
groups = []
pointConstraints = []
orientConstraints = []
for i, (obj, target) in enumerate(zip(controlJoints, boundJoints)):
c = controllerShape.simpleCircle()
c.setParent(obj)
c.t.set(0, 0, 0)
c.r.set(0, 0, 0)
controls.append(c)
spinner = group(em=True, name='spinner%i' % i, p=target)
spinner.r.set(0, 0, 0)
spinner.setParent(obj)
spinner.t.set(0, 0, 0)
# Aligning the spinners to the bound joint means we don't have to offset
# the orientConstraint which means nicer numbers.
# spinner.setRotation( target.getRotation(space='world'), space='world' )
groups.append(spinner)
pointConstraints.append( core.constraints.pointConst( obj, target, maintainOffset=False ) )
orientConstraints.append( core.constraints.orientConst( spinner, target, maintainOffset=False ) )
children = obj.listRelatives(type='joint')
if children:
obj = children[0]
else:
obj = None
break
for pSrc, pDest in zip( pointConstraints[:-1], pointConstraints[1:]):
pSrc >> pDest
for oSrc, oDest in zip( orientConstraints[:-1], orientConstraints[1:]):
oSrc >> oDest
# &&& This and the i+7 reflect the number of controls that influence
bigList = [None] * influenceDist + controls + [None] * influenceDist
influenceRange = (influenceDist * 2) + 1
axis = util.identifyAxis(controlChain[0].listRelatives(type='joint')[0])
exp = []
for i, spinner in enumerate(groups):
exp.append(driverExpression( spinner, bigList[i: i + influenceRange], axis ))
expression( s=';\n'.join(exp) )
return controls, util.ConstraintResults( pointConstraints[0], orientConstraints[0] )
class SplineTwist(MetaControl):
''' Spline IK that provides control to twist individual sections. '''
ik_ = 'pdil.tool.fossil.rigging.splineTwist.buildSplineTwist'
ikInput = OrderedDict( [
('controlCountOrCrv', [
ParamInfo( 'CV count', 'How many cvs to use in auto generated curve', ParamInfo.INT, default=4, min=4 ),
ParamInfo( 'Curve', 'A nurbs curve to use for spline', ParamInfo.NODE_0 ),
] ),
('simplifyCurve',
ParamInfo( 'Simplify Curve', 'If True, the curve cvs will space out evenly, possibly altering the postions', ParamInfo.BOOL, default=True) ),
('twistInfDist',
ParamInfo( 'Twist influence', 'How many joints on one side are influenced by the twisting, zero means it is done automatically.', ParamInfo.INT, default=0, min=0) ),
('tipBend',
ParamInfo( 'Tip Bend', 'The tip control should influence the ease out bend', ParamInfo.BOOL, default=True) ),
('sourceBend',
ParamInfo( 'Source Bend', 'The source control should influence the ease in bend', ParamInfo.BOOL, default=True) ),
('matchOrient',
ParamInfo( 'Match Orient', "First and last controller are set to TrueZero'd", ParamInfo.BOOL, default=True) ),
('useLeadOrient',
ParamInfo( 'Lead Orient', 'The controls have the same orientation as the first joint', ParamInfo.BOOL, default=False) ),
('allowOffset',
ParamInfo( 'Allow Offset', 'If you Simplyify Curve, the joints will slightly shift unless you Allow Offset or the joints are straight', ParamInfo.BOOL, default=False) ),
('twistStyle',
ParamInfo( 'Twist Style', '0 = advanced, 1=x, 2=-x 3=y ...', ParamInfo.ENUM, enum=TwistStyle.asChoices(), default=TwistStyle.ADVANCED ) ),
('name',
ParamInfo( 'Name', 'Name', ParamInfo.STR, '')),
] )
fkArgs = {'translatable': True}
@classmethod
def readIkKwargs(cls, card, isMirroredSide, sideAlteration=lambda **kwargs: kwargs, kinematicType='ik'):
'''
Overriden to handle if a custom curve was given, which then needs to be duplicated, mirrored and
fed directly into the splineTwist.
'''
kwargs = cls.readKwargs(card, isMirroredSide, sideAlteration, kinematicType='ik')
if isMirroredSide:
if 'controlCountOrCrv' in kwargs and not isinstance( kwargs['controlCountOrCrv'], int ):
crv = kwargs['controlCountOrCrv']
crv = duplicate(crv)[0]
kwargs['controlCountOrCrv'] = crv
move( crv.sp, [0, 0, 0], a=True )
move( crv.rp, [0, 0, 0], a=True )
crv.sx.set(-1)
kwargs['duplicateCurve'] = False
return kwargs
def addConnectingCurve(objs):
'''
Given a list of objects, make a curve that links all of them.
'''
crv = curve( d=1, p=[(0, 0, 0)] * len(objs) )
grp = group(crv, n='connectingCurve')
for i, obj in enumerate(objs):
handle = cluster(crv.cv[i])[1]
pointConstraint( obj, handle )
handle.setParent( grp )
hide(handle)
crv.getShape().overrideEnabled.set( 1 )
crv.getShape().overrideDisplayType.set( 2 )
return grp
def countJoints(start, end):
count = 2
p = end.getParent()
while p and p != start:
p = p.getParent()
count += 1
if not p:
return 0
return count
def driverExpression( driven, controls, axis ):
'''
The `driven` node's .rz will be driven by the list of `controls`.
`controls` is a list of objects, and optional empty entries.
Example, if you have joints, A B C and controls X Y Z, you would do:
driverExpression( A, [None, X, Y] )
driverExpression( B, [X, Y, Z] )
driverExpression( C, [Y, Z, None] )
This means A will be fully influenced by X, and partially by Y.
B is fully influenced by Y and partially by X and Z.
'''
powers = calcInfluence(controls)
exp = []
for power, ctrl in zip(powers, controls):
if ctrl:
exp.append( '{0}.r{axis} * {1}'.format(ctrl, power, axis=axis) )
return '{0}.r{axis} = {1};'.format( driven, ' + '.join(exp), axis=axis )
def calcInfluence( controls ):
'''
Given a list (Maybe change to a number?) returns a list of power falloffs.
controls can have None placeholders
power falls off to end of controls
low upper
v v
0 1 2 3 4
# Result: [0.5, 0.75, 1.0, 0.75, 0.5]
low upper
v v
0 1 2 3 4 5
# Result: [0.5, 0.75, 1.0, 1.0, 0.75, 0.5]
'''
max = len(controls)
if len(controls) % 2 == 0:
upper = int(len(controls) / 2 + 1)
lower = upper - 2
else:
upper = int(len(controls) / 2 + 1)
lower = upper - 1
delta = 1 / float(lower) * 0.5
powers = [1.0] * len(controls)
#for i, (lowCtrl, upCtrl) in enumerate(zip(controls[upper:], reversed(controls[:lower]) ), 1):
for i, (lowCtrl, upCtrl) in enumerate(zip(range(upper, max), range( lower - 1, -1, -1 ) ), 1):
power = 1 - delta * i
powers[lowCtrl] = power
powers[upCtrl] = power
return powers
| bsd-3-clause | -3,334,268,302,058,666,000 | 37.341732 | 194 | 0.60759 | false |
TimothyZhang/ccc_helper | test.py | 1 | 5100 | # coding=utf-8
# Copyright 2014 Timothy Zhang([email protected]).
#
# This file is part of Structer.
#
# Structer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Structer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Structer. If not, see <http://www.gnu.org/licenses/>.
from unittest import TestCase
from ccc import Project, SceneAsset, CompareContext, Prefab
class TestCCC(TestCase):
def setUp(self):
self.project = Project('test_project')
self.project.load()
def synchronize_asset_instances(self, asset):
"""
:param Asset asset:
"""
ctx = CompareContext()
for node in asset.root.iterate_instance_roots(False):
uuid_ = node.get_prefab_uuid()
prefab = self.project.get_asset_by_uuid(uuid_)
if not prefab:
ctx.remove('miss prefab for %s' % node.path)
continue
assert isinstance(prefab, Prefab)
node.synchronize(prefab.root, ctx, True)
return ctx
def assertContextEqual(self, ctx1, ctx2):
"""
:param CompareContext ctx1:
:param CompareContext ctx2:
"""
if cmp(ctx1, ctx2) != 0:
print 'ctx1:', ctx1
print 'ctx2:', ctx2
self.assertEqual(ctx1, ctx2)
def test_ss1(self):
s1 = self.project.get_asset_by_path('testcases/ss1/s1.fire')
self.assertIsInstance(s1, SceneAsset)
ctx1 = self.synchronize_asset_instances(s1)
ctx2 = CompareContext()
ctx2.push('i1').push('_color').change('g').change('b').pop().pop().ignore('i2').ignore('i3').ignore('i4')
self.assertContextEqual(ctx1, ctx2)
def test_ss2(self):
s1 = self.project.get_asset_by_path('testcases/ss2/s1.fire')
self.assertIsInstance(s1, SceneAsset)
ctx1 = self.synchronize_asset_instances(s1)
ctx2 = CompareContext()
ctx2.push('i1').change('_opacity').push('_color').change('g').change('b').pop().push('_contentSize').\
change('width').change('height').pop().pop()
self.assertContextEqual(ctx1, ctx2)
def test_ss3(self):
s1 = self.project.get_asset_by_path('testcases/ss3/s1.fire')
self.assertIsInstance(s1, SceneAsset)
ctx1 = self.synchronize_asset_instances(s1)
ctx2 = CompareContext()
ctx2.push('i1').push('(components)').add('cc.Widget').pop().pop()
self.assertContextEqual(ctx1, ctx2)
def test_ss4(self):
s1 = self.project.get_asset_by_path('testcases/ss4/s1.fire')
ctx1 = self.synchronize_asset_instances(s1)
ctx2 = CompareContext()
self.assertContextEqual(ctx1, ctx2)
def test_ss5(self):
s1 = self.project.get_asset_by_path('testcases/ss5/s1.fire')
ctx1 = self.synchronize_asset_instances(s1)
self.assert_(not ctx1.has_changed())
def test_nested(self):
s1 = self.project.get_asset_by_path('testcases/nested/p1.prefab')
self.assertEqual(s1.depth, 1)
s1 = self.project.get_asset_by_path('testcases/nested/p2.prefab')
self.assertEqual(s1.depth, 2)
s1 = self.project.get_asset_by_path('testcases/nested/s1.fire')
self.assertEqual(s1.depth, 0)
def clear_setting(self):
self.project.ignore_components.clear()
self.project.ignore_component_properties.clear()
self.project.ignore_component_properties_if_empty.clear()
def test_cr1_cr2_cr3(self):
# cr1
self.clear_setting()
self.project.ignore_components.add('cc.Button')
# print self.project.ignore_components
s1 = self.project.get_asset_by_path('testcases/cr1_cr2_cr3/s1.fire')
ctx1 = self.synchronize_asset_instances(s1)
ctx2 = CompareContext()
self.assertContextEqual(ctx1, ctx2)
# cr2
self.clear_setting()
self.project.ignore_component_properties['cc.Button'] = {'clickEvents'}
s2 = self.project.get_asset_by_path('testcases/cr1_cr2_cr3/s2.fire')
ctx1 = self.synchronize_asset_instances(s2)
ctx2 = CompareContext()
self.assertContextEqual(ctx1, ctx2)
# cr3
self.clear_setting()
self.project.ignore_component_properties_if_empty['cc.Button'] = {'clickEvents'}
s3 = self.project.get_asset_by_path('testcases/cr1_cr2_cr3/s3.fire')
ctx1 = self.synchronize_asset_instances(s3)
ctx2 = CompareContext()
self.assertContextEqual(ctx1, ctx2)
s4 = self.project.get_asset_by_path('testcases/cr1_cr2_cr3/s4.fire')
ctx1 = self.synchronize_asset_instances(s4)
self.assert_(ctx1.has_changed())
| gpl-3.0 | -4,194,254,677,679,598,600 | 36.777778 | 113 | 0.638627 | false |
t00mas/datascience-python | classification/knearest.py | 1 | 1554 | import matplotlib
import matplotlib.pyplot as pyplot
import numpy
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
def get_iris_dataset():
iris = datasets.load_iris()
return iris.data[:, :2], iris.target
def get_knn_classifier(X, y, n_neighbors=None):
if not n_neighbors:
n_neighbors = 6
classifier = neighbors.KNeighborsClassifier(n_neighbors, weights='distance')
classifier.fit(X, y)
return classifier, n_neighbors
def get_meshgrid(X, y, h=None):
if not h:
h = .02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
return numpy.meshgrid(
numpy.arange(x_min, x_max, h), numpy.arange(y_min, y_max, h))
def predict(classifier, mesh_xx, mesh_yy):
Z = classifier.predict(numpy.c_[mesh_xx.ravel(), mesh_yy.ravel()])
return Z.reshape(mesh_xx.shape)
def plot_classified_regions(X, y, classifier, n_neighbors):
xx, yy = get_meshgrid(X, y)
Z = predict(classifier, xx, yy)
pyplot.figure()
pyplot.pcolormesh(xx, yy, Z)
# Plot also the training points
cmap = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF'])
pyplot.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap, alpha=0.8)
pyplot.xlim(xx.min(), xx.max())
pyplot.ylim(yy.min(), yy.max())
pyplot.title("3-Class classification (k = %i)" % (n_neighbors))
pyplot.savefig('knearest.png')
X, y = get_iris_dataset()
knn, n_neighbors = get_knn_classifier(X, y)
plot_classified_regions(X, y, knn, n_neighbors)
| mit | 2,858,620,498,153,865,000 | 28.320755 | 80 | 0.63964 | false |
ToontownUprising/src | toontown/ai/NewsManagerAI.py | 1 | 2153 | from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class NewsManagerAI(DistributedObjectAI):
notify = directNotify.newCategory('NewsManagerAI')
def announceGenerate(self):
DistributedObjectAI.announceGenerate(self)
self.accept('avatarEntered', self.__handleAvatarEntered)
def __handleAvatarEntered(self, avatar):
if self.air.suitInvasionManager.getInvading():
self.air.suitInvasionManager.notifyInvasionBulletin(avatar.getDoId())
def setPopulation(self, todo0):
pass
def setBingoWin(self, todo0):
pass
def setBingoStart(self):
pass
def setBingoEnd(self):
pass
def setCircuitRaceStart(self):
pass
def setCircuitRaceEnd(self):
pass
def setTrolleyHolidayStart(self):
pass
def setTrolleyHolidayEnd(self):
pass
def setTrolleyWeekendStart(self):
pass
def setTrolleyWeekendEnd(self):
pass
def setRoamingTrialerWeekendStart(self):
pass
def setRoamingTrialerWeekendEnd(self):
pass
def setInvasionStatus(self, msgType, cogType, numRemaining, skeleton):
self.sendUpdate('setInvasionStatus', args=[msgType, cogType, numRemaining, skeleton])
def setHolidayIdList(self, holidays):
self.sendUpdate('setHolidayIdList', holidays)
def holidayNotify(self):
pass
def setWeeklyCalendarHolidays(self, todo0):
pass
def getWeeklyCalendarHolidays(self):
return []
def setYearlyCalendarHolidays(self, todo0):
pass
def getYearlyCalendarHolidays(self):
return []
def setOncelyCalendarHolidays(self, todo0):
pass
def getOncelyCalendarHolidays(self):
return []
def setRelativelyCalendarHolidays(self, todo0):
pass
def getRelativelyCalendarHolidays(self):
return []
def setMultipleStartHolidays(self, todo0):
pass
def getMultipleStartHolidays(self):
return []
def sendSystemMessage(self, todo0, todo1):
pass
| mit | 2,677,477,682,247,874,600 | 21.904255 | 93 | 0.681375 | false |
arduino-org/s4t-iotronic | lib/test_pub.py | 1 | 2229 |
wampAddress = 'ws://172.17.3.139:8181/ws'
wampRealm = 's4t'
#from threading import Thread
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.twisted.wamp import ApplicationSession
from twisted.internet.defer import inlineCallbacks
#import per test
from twisted.internet.defer import DeferredQueue
from twisted.internet import threads
#Classe autobahn per ka gestione della comunicazione con i dispositivi remoti
class AutobahnMRS(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print("Sessio attached [Connect to WAMP Router] Sub")
def onMessage(*args):
print args
try:
yield self.subscribe(onMessage, 'test')
print ("Subscribed to topic: test")
except Exception as e:
print("Exception:" +e)
#Classe autobahn per la gestione della comunicazione interna
class AutobahnIM(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print("Sessio attached [Connect to WAMP Router] Pub")
try:
yield self.publish('test','YOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
print ("Publish to topic: test")
except Exception as e:
print("Exception:" +e)
#Classe per la gestione della comunicazioni con i dispositivi remoti
class ManageRemoteSystem:
def __init__(self):
self.runner = ApplicationRunner(url= wampAddress, realm = wampRealm)
def start(self):
self.runner.run(AutobahnMRS, start_reactor=False);
#Classe per la gestione della comunicazione interna al ManageRemoteSystem
class InternalMessages:
def __init__(self):
self.runner = ApplicationRunner(url= wampAddress, realm = wampRealm)
def start(self):
self.runner.run(AutobahnIM, start_reactor=False);
#Classe principale per il servizio iotronic
#class S4tServer:
def something():
count = 0
while True:
print('something:', count)
yield sleep(1)
count+=1
if __name__ == '__main__':
#import multiprocessing
server = ManageRemoteSystem()
#sendMessage = InternalMessages()
server.start()
#sendMessage.start()
from twisted.internet import reactor
reactor.run()
#thread1 = Thread(target = reactor.run())
#thread2 = Thread(target = something())
#thread2.start()
#thread1.start()
#thread1.daemon = True
#thread2.daemon = True
#thread2.join()
#thread1.join()
| apache-2.0 | -9,203,456,461,136,472,000 | 21.979381 | 77 | 0.746074 | false |
pitunti/alfaPitunti | plugin.video.alfa/channels/tupornotv.py | 1 | 10715 | # -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Pendientes de Votación", action="novedades",
url="http://tuporno.tv/pendientes"))
itemlist.append(
Item(channel=item.channel, title="Populares", action="masVistos", url="http://tuporno.tv/", folder=True))
itemlist.append(
Item(channel=item.channel, title="Categorias", action="categorias", url="http://tuporno.tv/categorias/",
folder=True))
itemlist.append(Item(channel=item.channel, title="Videos Recientes", action="novedades",
url="http://tuporno.tv/videosRecientes/", folder=True))
itemlist.append(Item(channel=item.channel, title="Top Videos (mas votados)", action="masVotados",
url="http://tuporno.tv/topVideos/", folder=True))
itemlist.append(Item(channel=item.channel, title="Nube de Tags", action="categorias", url="http://tuporno.tv/tags/",
folder=True))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def novedades(item):
logger.info()
url = item.url
# ------------------------------------------------------
# Descarga la página
# ------------------------------------------------------
data = scrapertools.cachePage(url)
# logger.info(data)
# ------------------------------------------------------
# Extrae las entradas
# ------------------------------------------------------
# seccion novedades
'''
<table border="0" cellpadding="0" cellspacing="0" ><tr><td align="center" width="100%" valign="top" height="160px">
<a href="/videos/cogiendo-en-el-bosque"><img src="imagenes/videos//c/o/cogiendo-en-el-bosque_imagen2.jpg" alt="Cogiendo en el bosque" border="0" align="top" /></a>
<h2><a href="/videos/cogiendo-en-el-bosque">Cogiendo en el bosque</a></h2>
'''
patronvideos = '<div class="relative">(.*?)</div><div class="video'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
# Titulo
try:
scrapedtitle = re.compile('title="(.+?)"').findall(match)[0]
except:
scrapedtitle = ''
try:
scrapedurl = re.compile('href="(.+?)"').findall(match)[0]
scrapedurl = urlparse.urljoin(url, scrapedurl)
except:
continue
try:
scrapedthumbnail = re.compile('src="(.+?)"').findall(match)[0]
scrapedthumbnail = urlparse.urljoin(url, scrapedthumbnail)
except:
scrapedthumbnail = ''
scrapedplot = ""
try:
duracion = re.compile('<div class="duracion">(.+?)<').findall(match)[0]
except:
try:
duracion = re.compile('\((.+?)\)<br').findall(match[3])[0]
except:
duracion = ""
# logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], duracion=["+duracion+"]")
# Añade al listado de XBMC
# trozos = scrapedurl.split("/")
# id = trozos[len(trozos)-1]
# videos = "http://149.12.64.129/videoscodiH264/"+id[0:1]+"/"+id[1:2]+"/"+id+".flv"
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
# ------------------------------------------------------
# Extrae el paginador
# ------------------------------------------------------
# <a href="/topVideos/todas/mes/2/" class="enlace_si">Siguiente </a>
patronsiguiente = '<a href="(.+?)" class="enlace_si">Siguiente </a>'
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
if len(siguiente) > 0:
scrapedurl = urlparse.urljoin(url, siguiente[0])
itemlist.append(Item(channel=item.channel, action="novedades", title="!Next page", url=scrapedurl, folder=True))
return itemlist
def masVistos(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/hoy", folder=True))
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", url="http://tuporno.tv/recientes",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/semana", folder=True))
itemlist.append(
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/mes", folder=True))
itemlist.append(
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/ano", folder=True))
return itemlist
def categorias(item):
logger.info()
url = item.url
# ------------------------------------------------------
# Descarga la página
# ------------------------------------------------------
data = scrapertools.cachePage(url)
# logger.info(data)
# ------------------------------------------------------
# Extrae las entradas
# ------------------------------------------------------
# seccion categorias
# Patron de las entradas
if url == "http://tuporno.tv/categorias/":
patronvideos = '<li><a href="([^"]+)"' # URL
patronvideos += '>([^<]+)</a></li>' # TITULO
else:
patronvideos = '<a href="(.tags[^"]+)"' # URL
patronvideos += ' class="[^"]+">([^<]+)</a>' # TITULO
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
if match[1] in ["SexShop", "Videochat", "Videoclub"]:
continue
# Titulo
scrapedtitle = match[1]
scrapedurl = urlparse.urljoin(url, match[0])
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
itemlist.append(Item(channel=item.channel, action="novedades", title=scrapedtitle.capitalize(), url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def masVotados(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/topVideos/todas/hoy",
folder=True))
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades",
url="http://tuporno.tv/topVideos/todas/recientes", folder=True))
itemlist.append(
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/topVideos/todas/semana",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/topVideos/todas/mes",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/topVideos/todas/ano",
folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
else:
texto = item.extra.replace(" ", "+")
item.url = "http://tuporno.tv/buscador/?str=" + texto
try:
return getsearch(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def getsearch(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
patronvideos = '<div class="relative"><a href="(.videos[^"]+)"[^>]+><img.+?src="([^"]+)" alt="(.+?)" .*?<div class="duracion">(.+?)</div></div></div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
itemlist = []
for match in matches:
# Titulo
scrapedtitle = match[2].replace("<b>", "")
scrapedtitle = scrapedtitle.replace("</b>", "")
scrapedurl = urlparse.urljoin("http://tuporno.tv/", match[0])
scrapedthumbnail = urlparse.urljoin("http://tuporno.tv/", match[1])
scrapedplot = ""
duracion = match[3]
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
'''<a href="/buscador/?str=busqueda&desde=HV_PAGINA_SIGUIENTE" class="enlace_si">Siguiente </a>'''
patronsiguiente = '<a href="([^"]+)" class="enlace_si">Siguiente </a>'
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
if len(siguiente) > 0:
patronultima = '<!--HV_SIGUIENTE_ENLACE'
ultpagina = re.compile(patronultima, re.DOTALL).findall(data)
scrapertools.printMatches(siguiente)
if len(ultpagina) == 0:
scrapedurl = urlparse.urljoin(item.url, siguiente[0])
itemlist.append(
Item(channel=item.channel, action="getsearch", title="!Next page", url=scrapedurl, folder=True))
return itemlist
def play(item):
logger.info()
itemlist = []
# Lee la pagina del video
data = scrapertools.cachePage(item.url)
codVideo = scrapertools.get_match(data, 'body id="([^"]+)"')
logger.info("codVideo=" + codVideo)
# Lee la pagina con el codigo
# http://tuporno.tv/flvurl.php?codVideo=188098&v=MAC%2011,5,502,146
url = "http://tuporno.tv/flvurl.php?codVideo=" + codVideo + "&v=MAC%2011,5,502,146"
data = scrapertools.cachePage(url)
logger.info("data=" + data)
kpt = scrapertools.get_match(data, "kpt\=(.+?)\&")
logger.info("kpt=" + kpt)
# Decodifica
import base64
url = base64.decodestring(kpt)
logger.info("url=" + url)
itemlist.append(
Item(channel=item.channel, action="play", title=item.title, url=url, thumbnail=item.thumbnail, plot=item.plot,
server="Directo", folder=False))
return itemlist
| gpl-3.0 | 8,343,848,909,474,973,000 | 39.549242 | 167 | 0.567865 | false |
designcc/django-ccbasket | ccbasket_testproject/shop/views.py | 1 | 1061 | # -*- coding: utf-8 -*-
import logging
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.response import TemplateResponse
from models import SimpleProduct, ComplexProduct
logger = logging.getLogger('ccbasket')
def home(request):
return render_to_response('shop/home.html', {
}, context_instance=RequestContext(request))
def product(request, pk, model=SimpleProduct, template='shop/product.html'):
product = model.objects.get(pk=pk)
return render_to_response(template, {
'product': product
}, context_instance=RequestContext(request))
def index(request):
simple_products = SimpleProduct.objects.all()
complex_products = ComplexProduct.objects.all()
products = []
for product in simple_products:
products.append(product)
for product in complex_products:
products.append(product)
return render_to_response('shop/index.html', {
'products': products,
}, context_instance=RequestContext(request))
| bsd-3-clause | -8,419,915,165,657,524,000 | 26.921053 | 76 | 0.71065 | false |
cea-hpc/shine | lib/Shine/Configuration/Configuration.py | 1 | 8263 | # Configuration.py -- Configuration container
# Copyright (C) 2007-2014 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from ClusterShell.NodeSet import NodeSet
from Shine.Configuration.FileSystem import FileSystem, Target, Routers, Clients
from Shine.Configuration.Exceptions import ConfigException
class Configuration:
def __init__(self):
"""FS configuration initializer."""
self.debug = False
self._fs = None
@classmethod
def load_from_cache(cls, fsname):
conf = Configuration()
conf._fs = FileSystem.load_from_fsname(fsname)
return conf
@classmethod
def load_from_model(cls, lmf):
conf = Configuration()
conf._fs = FileSystem(lmf)
return conf
@classmethod
def create_from_model(cls, lmf, update_mode=False):
conf = Configuration()
conf._fs = FileSystem.create_from_model(lmf, update_mode=update_mode)
return conf
def close(self):
self._fs.close()
###
def get_nid(self, who):
return self._fs.get_nid(who)
def get_target_mgt(self):
tgt_cf_list = self._fs.get('mgt')
return Target('MGT', tgt_cf_list[0])
def get_target_mdt(self):
tgt_cf_list = self._fs.get('mdt')
return Target('MDT', tgt_cf_list[0])
def iter_targets_ost(self):
tgt_cf_list = self._fs.get('ost')
for t in tgt_cf_list:
yield Target('OST', t)
def iter_targets(self):
"""
Return a generator over all FS targets.
"""
for target_type in [ 'mgt', 'mdt', 'ost' ]:
if target_type not in self._fs.model:
continue
tgt_cf_list = self._fs.get(target_type)
for t in tgt_cf_list:
yield Target(target_type, t)
def get_target_from_tag_and_type(self, target_tag, target_type):
"""
This function aims to retrieve a target look for it's type
and it's tag.
"""
target = None
if target_type == 'MGT' or target_type == 'MGS' :
# The target is the MGT
target = self.get_target_mgt()
elif target_type == 'MDT':
# The target is the MDT
target = self.get_target_mdt()
elif target_type == 'OST':
# The target is an OST. Walk through the list of
# OSTs to retrieve the right one.
for current_ost in self.iter_targets_ost():
if current_ost.get_tag() == target_tag:
# The ost tag match the searched one.
# save the target and break the loop
target = current_ost
break
else:
# The target type is currently not supported by the
# configuration
raise ConfigException("Unknown target type %s" %target_type)
return target
def get_default_mount_path(self):
"""
Return the default client mount path or raise a ConfigException
if it does not exist.
"""
if not 'mount_path' in self._fs.model:
raise ConfigException("mount_path not specified")
return self._fs.get('mount_path')
def iter_clients(self):
"""
Iterate over (node, mount_path, mount_options, subdir)
"""
if not 'client' in self._fs.model:
return
for clnt in [Clients(clnt) for clnt in self._fs.get('client')]:
assert '[' not in clnt.get_nodes()
path = clnt.get_mount_path() or self.get_default_mount_path()
opts = clnt.get_mount_options() or self.get_default_mount_options()
subdir = clnt.get('subdir')
yield clnt.get_nodes(), path, opts, subdir
def iter_routers(self):
"""
Iterate over (node)
"""
if 'router' in self._fs.model:
for elem in self._fs.get('router'):
rtr = Routers(elem)
yield rtr.get_nodes()
# General FS getters
#
def get_fs_name(self):
return self._fs.get('fs_name')
def get_cfg_filename(self):
"""
Return FS xmf file path.
"""
return self._fs.xmf_path
def get_description(self):
return self._fs.get('description')
def has_quota(self):
"""
Return if quota has been enabled in the configuration file.
"""
return self._fs.get('quota')
def get_quota_type(self):
return self._fs.get('quota_type')
def get_quota_bunit(self):
return self._fs.get('quota_bunit')
def get_quota_iunit(self):
return self._fs.get('quota_iunit')
def get_quota_btune(self):
return self._fs.get('quota_btune')
def get_quota_itune(self):
return self._fs.get('quota_itune')
def get_mount_path(self):
return self._fs.get('mount_path')
def get_default_mount_options(self):
return self._fs.get('mount_options')
def get_target_mount_options(self, target):
return self._fs.get('%s_mount_options' % str(target).lower())
def get_target_mount_path(self, target):
return self._fs.get('%s_mount_path' % str(target).lower())
def get_target_format_params(self, target):
return self._fs.get('%s_format_params' % str(target).lower())
def get_target_mkfs_options(self, target):
return self._fs.get('%s_mkfs_options' % str(target).lower())
# Stripe info getters
#
def get_stripecount(self):
return self._fs.get('stripe_count', None)
def get_stripesize(self):
return self._fs.get('stripe_size', None)
# Target status setters
#
def register_targets(self, targets=None):
"""
Set filesystem targets as 'in use'.
If `targets' is not specified, all managed targets from the
filesystem will be used.
These targets could not be use anymore for other filesystems.
"""
if not targets:
targets = []
for tgttype in ('mgt', 'mdt', 'ost'):
if tgttype not in self._fs.model:
continue
for target in self._fs.get(tgttype):
if target.get('mode') == 'managed':
targets.append(Target(tgttype, target))
for target in targets:
self._fs.register_target(target)
def unregister_targets(self, targets=None):
"""
Set filesystem targets as available in the backend.
If `targets' is not specified, all managed targets from the
filesystem will be used.
These targets could be now reuse.
"""
if not targets:
targets = []
for tgttype in ('mgt', 'mdt', 'ost'):
if tgttype not in self._fs.model:
continue
for target in self._fs.get(tgttype):
if target.get('mode') == 'managed':
targets.append(Target(tgttype, target))
for target in targets:
self._fs.unregister_target(target)
def set_debug(self, debug):
self.debug = debug
def register_fs(self):
"""
This function aims to register the file system configuration
to the backend.
"""
self._fs.register()
def unregister_fs(self):
"""
This function aims to unregister the file system configuration
from the backend.
"""
self._fs.unregister()
| gpl-2.0 | -7,231,906,994,213,051,000 | 30.418251 | 79 | 0.57461 | false |
david-abel/simple_rl | simple_rl/tasks/maze_1d/Maze1DPOMDPClass.py | 1 | 2420 | # Python imports.
from collections import defaultdict
import random
# Other imports.
from simple_rl.pomdp.POMDPClass import POMDP
from simple_rl.tasks.maze_1d.Maze1DStateClass import Maze1DState
class Maze1DPOMDP(POMDP):
''' Class for a 1D Maze POMDP '''
ACTIONS = ['west', 'east']
OBSERVATIONS = ['nothing', 'goal']
def __init__(self):
self._states = [Maze1DState('left'), Maze1DState('middle'), Maze1DState('right'), Maze1DState('goal')]
# Initial belief is a uniform distribution over states
b0 = defaultdict()
for state in self._states: b0[state] = 0.25
POMDP.__init__(self, Maze1DPOMDP.ACTIONS, Maze1DPOMDP.OBSERVATIONS, self._transition_func, self._reward_func, self._observation_func, b0)
def _transition_func(self, state, action):
'''
Args:
state (Maze1DState)
action (str)
Returns:
next_state (Maze1DState)
'''
if action == 'west':
if state.name == 'left':
return Maze1DState('left')
if state.name == 'middle':
return Maze1DState('left')
if state.name == 'right':
return Maze1DState('goal')
if state.name == 'goal':
return Maze1DState(random.choice(['left', 'middle', 'right']))
if action == 'east':
if state.name == 'left':
return Maze1DState('middle')
if state.name == 'middle':
return Maze1DState('goal')
if state.name == 'right':
return Maze1DState('right')
if state.name == 'goal':
return Maze1DState(random.choice(['left', 'middle', 'right']))
raise ValueError('Invalid state: {} action: {} in 1DMaze'.format(state, action))
def _observation_func(self, state, action):
next_state = self._transition_func(state, action)
return 'goal' if next_state.name == 'goal' else 'nothing'
def _reward_func(self, state, action, next_state):
# next_state = self._transition_func(state, action)
observation = self._observation_func(state, action)
return (1. - self.step_cost) if (next_state.name == observation == 'goal') else (0. - self.step_cost)
def is_in_goal_state(self):
return self.cur_state.name == 'goal'
if __name__ == '__main__':
maze_pomdp = Maze1DPOMDP()
| apache-2.0 | 2,584,774,365,799,531,000 | 35.666667 | 145 | 0.579752 | false |
tenable/Tenable.io-SDK-for-Python | tenable_io/api/plugins.py | 1 | 1572 | from tenable_io.api.base import BaseApi
from tenable_io.api.models import PluginDetails, PluginFamilyDetails, PluginFamilyList
class PluginsApi(BaseApi):
def families(self, include_all=None):
"""Return list of plugin families.
:param include_all: Whether or not to include all plugins. Defaults to be less inclusive.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.PluginFamilyList`.
"""
params = {'all': include_all}
response = self._client.get('plugins/families', params={k: v for (k, v) in params.items() if v})
return PluginFamilyList.from_json(response.text)
def family_details(self, family_id):
"""Return plugin family details.
:param family_id: Plugin family ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.PluginFamilyDetails`.
"""
response = self._client.get('plugins/families/%(id)s', path_params={'id': family_id})
return PluginFamilyDetails.from_json(response.text)
def plugin_details(self, plugin_id):
"""Return plugin details.
:param plugin_id: Plugin ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.PluginDetails`.
"""
response = self._client.get('plugins/plugin/%(id)s', path_params={'id': plugin_id})
return PluginDetails.from_json(response.text)
| mit | 4,282,903,785,587,915,000 | 42.666667 | 104 | 0.67112 | false |
anarchivist/pyflag | src/plugins/Flash/AdvancedCommands.py | 1 | 13927 | """ These Flash commands allow more sophisticated operations, most of
which may not be needed by most users. Some operations are
specifically designed for testing and have little use in practice.
"""
import pyflag.pyflagsh as pyflagsh
import pyflag.Registry as Registry
import pyflag.DB as DB
import fnmatch
import pyflag.FileSystem as FileSystem
import pyflag.Scanner as Scanner
import time, types
import pyflag.pyflaglog as pyflaglog
import BasicCommands
import pyflag.ScannerUtils as ScannerUtils
import pyflag.conf
config=pyflag.conf.ConfObject()
class scan_path(pyflagsh.command):
""" This takes a path as an argument and runs the specified scanner on the path
this might be of more use than specifying inodes for the average user since if you load
two disk images, then you might have /disk1 and /disk2 and want to just run scans over
one of them, which is simpler to specify using /disk1. """
def help(self):
return "scan VFSPath [list of scanners]: Scans the VFS path with the scanners specified"
def complete(self, text,state):
if len(self.args)>2 or len(self.args)==2 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] +\
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
else:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(path,1,%r) as abbrev,path from file where path like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['path']
def wait_for_scan(self, cookie):
""" Waits for scanners to complete """
pdbh = DB.DBO()
pdbh.check_index('jobs','cookie')
## Often this process owns a worker as well. In that case we can wake it up:
import pyflag.Farm as Farm
Farm.wake_workers()
## Wait until there are no more jobs left.
while 1:
pdbh.execute("select count(*) as total from jobs where cookie=%r and arg1=%r",
(cookie,
self.environment._CASE))
row = pdbh.fetch()
if row['total']==0: break
time.sleep(1)
def execute(self):
scanners=[]
if len(self.args)<2:
yield self.help()
return
elif type(self.args[1]) == types.ListType:
scanners = self.args[1]
else:
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
## Assume that people always want recursive - I think this makes sense
path = self.args[0]
if not path.endswith("*"):
path = path + "*"
## FIXME For massive images this should be broken up, as in the old GUI method
dbh=DB.DBO(self.environment._CASE)
dbh.execute("select inode.inode from inode join file on file.inode = inode.inode where file.path rlike %r", fnmatch.translate(path))
pdbh = DB.DBO()
pdbh.mass_insert_start('jobs')
## This is a cookie used to identify our requests so that we
## can check they have been done later.
cookie = int(time.time())
for row in dbh:
inode = row['inode']
pdbh.mass_insert(
command = 'Scan',
arg1 = self.environment._CASE,
arg2 = row['inode'],
arg3 = ','.join(scanners),
cookie=cookie,
)#
pdbh.mass_insert_commit()
## Wait for the scanners to finish:
self.wait_for_scan(cookie)
yield "Scanning complete"
import pyflag.FlagFramework as FlagFramework
class init_flag_db(pyflagsh.command):
""" Creates the main flag db if needed """
def execute(self):
try:
dbh = DB.DBO()
except:
dbh = DB.DBO('mysql')
dbh.execute("create database `%s`" % config.FLAGDB)
dbh = DB.DBO()
FlagFramework.post_event("init_default_db", None)
yield "Done"
class delete_iosource(pyflagsh.command):
""" Deletes an iosource from the current case """
def complete(self, text, state):
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(name,1,%r) as abbrev,name from iosources where name like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['name']
def execute(self):
for iosource in self.args:
dbh = DB.DBO(self.environment._CASE)
dbh2 = dbh.clone()
dbh.delete('inode', where=DB.expand("inode like 'I%s|%%'", iosource))
dbh.execute("select * from filesystems where iosource = %r", iosource)
for row in dbh:
dbh2.delete('file', where=DB.expand("path like '%s%%'", iosource))
dbh.delete("iosources", where=DB.expand("name=%r", iosource))
yield "Removed IOSource %s" % iosource
class scan(pyflagsh.command):
""" Scan a glob of inodes with a glob of scanners """
def help(self):
return "scan inode [list of scanners]: Scans the inodes with the scanners specified"
def complete(self, text,state):
if len(self.args)>2 or len(self.args)==2 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] + \
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
else:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(inode,1,%r) as abbrev,inode from inode where inode like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['inode']
def execute(self):
if len(self.args)<2:
yield self.help()
return
## Try to glob the inode list:
dbh=DB.DBO(self.environment._CASE)
dbh.execute("select inode from inode where inode rlike %r",fnmatch.translate(self.args[0]))
pdbh = DB.DBO()
pdbh.mass_insert_start('jobs')
## This is a cookie used to identify our requests so that we
## can check they have been done later.
cookie = int(time.time())
scanners = []
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
scanners = ScannerUtils.fill_in_dependancies(scanners)
for row in dbh:
inode = row['inode']
pdbh.mass_insert(
command = 'Scan',
arg1 = self.environment._CASE,
arg2 = row['inode'],
arg3 = ','.join(scanners),
cookie=cookie,
)
pdbh.mass_insert_commit()
## Wait for the scanners to finish:
if self.environment.interactive:
self.wait_for_scan(cookie)
yield "Scanning complete"
def wait_for_scan(self, cookie):
""" Waits for scanners to complete """
pdbh = DB.DBO()
## Often this process owns a worker as well. In that case we can wake it up:
import pyflag.Farm as Farm
Farm.wake_workers()
## Wait until there are no more jobs left.
while 1:
pdbh.execute("select count(*) as total from jobs where cookie=%r and arg1=%r", (cookie,
self.environment._CASE))
row = pdbh.fetch()
if row and row['total']==0: break
time.sleep(1)
class scan_file(scan,BasicCommands.ls):
""" Scan a file in the VFS by name """
def help(self):
return "scan file [list of scanners]: Scan the file with the scanners specified "
def complete(self, text,state):
if len(self.args)>2 or len(self.args)==2 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] +\
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
else:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(path,1,%r) as abbrev,path from file where path like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['path']
def execute(self):
if len(self.args)<2:
yield self.help()
return
pdbh = DB.DBO()
pdbh.mass_insert_start('jobs')
cookie = int(time.time())
scanners = []
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
for path in self.glob_files(self.args[:1]):
path, inode, inode_id = self.environment._FS.lookup(path = path)
## This is a cookie used to identify our requests so that we
## can check they have been done later.
pdbh.mass_insert(
command = 'Scan',
arg1 = self.environment._CASE,
arg2 = inode,
arg3 = ','.join(scanners),
cookie=cookie,
)
pdbh.mass_insert_commit()
## Wait for the scanners to finish:
if 1 or self.environment.interactive:
self.wait_for_scan(cookie)
yield "Scanning complete"
##
## This allows people to reset based on the VFS path
##
class scanner_reset_path(scan):
""" Reset all files under a specified path """
def help(self):
return "scanner_reset_path path [list of scanners]: Resets the inodes under the path given with the scanners specified"
def execute(self):
if len(self.args)<2:
yield self.help()
return
scanners = []
if type(self.args[1]) == types.ListType:
scanners = self.args[1]
else:
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
print "GETTING FACTORIES"
factories = Scanner.get_factories(self.environment._CASE, scanners)
print "OK NOW RESETING EM"
for f in factories:
f.reset_entire_path(self.args[0])
print "HOKAY"
yield "Reset Complete"
## There is little point in distributing this because its very quick anyway.
class scanner_reset(scan):
""" Reset multiple inodes as specified by a glob """
def help(self):
return "reset inode [list of scanners]: Resets the inodes with the scanners specified"
def execute(self):
if len(self.args)<2:
yield self.help()
return
scanners = []
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
factories = Scanner.get_factories(self.environment._CASE, scanners)
for f in factories:
f.multiple_inode_reset(self.args[0])
yield "Resetting complete"
class load_and_scan(scan):
""" Load a filesystem and scan it at the same time """
def help(self):
return """load_and_scan iosource mount_point fstype [list of scanners]:
Loads the iosource into the right mount point and scans all
new inodes using the scanner list. This allows scanning to
start as soon as VFS inodes are produced and before the VFS is
fully populated.
"""
def complete(self, text,state):
if len(self.args)>4 or len(self.args)==4 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] + \
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
elif len(self.args)>3 or len(self.args)==3 and not text:
fstypes = [ x for x in Registry.FILESYSTEMS.class_names if x.startswith(text) ]
return fstypes[state]
elif len(self.args)>2 or len(self.args)==2 and not text:
return
elif len(self.args)>1 or len(self.args)==1 and not text:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(value,1,%r) as abbrev,value from meta where property='iosource' and value like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['value']
def execute(self):
if len(self.args)<3:
yield self.help()
return
iosource=self.args[0]
mnt_point=self.args[1]
filesystem=self.args[2]
query = {}
dbh = DB.DBO()
dbh.mass_insert_start('jobs')
## This works out all the scanners that were specified:
tmp = []
for i in range(3,len(self.args)):
## Is it a parameter?
if "=" in self.args[i]:
prop,value = self.args[i].split("=",1)
query[prop] = value
else:
tmp.extend([x for x in fnmatch.filter(
Registry.SCANNERS.scanners, self.args[i]) ])
scanners = [ ]
for item in tmp:
if item not in scanners:
scanners.append(item)
## Load the filesystem:
try:
fs = Registry.FILESYSTEMS.dispatch(filesystem)
except KeyError:
yield "Unable to find a filesystem of %s" % filesystem
return
fs=fs(self.environment._CASE, query)
fs.cookie = int(time.time())
fs.load(mnt_point, iosource, scanners)
## Wait for all the scanners to finish
self.wait_for_scan(fs.cookie)
yield "Loading complete"
| gpl-2.0 | -4,366,271,114,927,967,700 | 36.438172 | 178 | 0.575644 | false |
mobarski/sandbox | parallel/p7cat.py | 1 | 1316 | ## p7cat.py - parallel concatenation
## (c) 2017 by mobarski (at) gmail (dot) com
## licence: MIT
## version: x1
from __future__ import print_function
import sys
import os
from multiprocessing import Process
from time import time
def write_part(path_in, path_out, offset, blocksize=4096):
fi = open(path_in,'rb')
fo = open(path_out,'r+b')
fo.seek(offset)
while True:
block = fi.read(blocksize)
fo.write(block)
if len(block)<blocksize: break
fi.close()
fo.close()
if __name__ == "__main__":
t0 = time()
print("\n\tP7 CONCAT START\n")
outpath = sys.argv[1]
filenames = sys.argv[2:]
#print('\tOUT',outpath)
#print('\tIN\n',filenames)
meta = {} # filename -> size, offset
offset = 0
for path in filenames:
size = os.path.getsize(path)
meta[path] = (size,offset)
offset += size
# allocate disk space
out = open(outpath,'wb')
out.seek(offset-1)
out.write(b'\x00')
out.close()
proc = {}
for path in filenames:
size,offset = meta[path]
p = Process(target=write_part, args=(path, outpath, offset))
p.start()
print("\tBEGIN pid:{0} size:{2} offset:{1}".format(p.pid,offset,size))
proc[path] = p
sys.stdout.flush()
for path in filenames:
p = proc[path]
p.join()
print("\tEND pid:{0}".format(p.pid))
print("\n\tRUN_TIME_TOTAL:{0:.1f}s\n".format(time()-t0))
| mit | -6,190,227,077,822,655,000 | 20.933333 | 75 | 0.647416 | false |
stoq/stoqdrivers | docs/fiscal-driver-template.py | 1 | 5263 | #
# Stoqdrivers template driver
#
# Copyright (C) 2007 Async Open Source <http://www.async.com.br>
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
import datetime
from decimal import Decimal
from zope.interface import implementer
from stoqdrivers.enum import TaxType
from stoqdrivers.interfaces import ICouponPrinter
from stoqdrivers.printers.capabilities import Capability
from stoqdrivers.printers.fiscal import SintegraData
from stoqdrivers.serialbase import SerialBase
from stoqdrivers.translation import stoqdrivers_gettext
_ = stoqdrivers_gettext
@implementer(ICouponPrinter)
class TemplateDriver(SerialBase):
supported = True
model_name = "Template Driver"
coupon_printer_charset = "ascii"
def __init__(self, port, consts=None):
SerialBase.__init__(self, port)
#
# This implements the ICouponPrinter Interface
#
# Coupon methods
def coupon_identify_customer(self, customer, address, document):
pass
def coupon_open(self):
pass
def coupon_cancel(self):
pass
def coupon_close(self, message):
coupon_id = 123
return coupon_id
def coupon_add_item(self, code, description, price, taxcode,
quantity, unit, discount, markup, unit_desc):
item_id = 123
return item_id
def coupon_cancel_item(self, item_id):
pass
def coupon_add_payment(self, payment_method, value, description):
return Decimal("123")
def coupon_totalize(self, discount, markup, taxcode):
return Decimal("123")
# Till / Daily flow
def summarize(self):
# Leitura X
pass
def close_till(self, previous_day):
# Redução Z
pass
def till_add_cash(self, value):
# Suprimento
pass
def till_remove_cash(self, value):
# Sangria
pass
def till_read_memory(self, start, end):
# Leitura Memory Fiscal data
pass
def till_read_memory_by_reductions(self, start, end):
# Leitura Memory Fiscal reduções
pass
# Introspection
def get_capabilities(self):
return dict(
item_code=Capability(max_len=13),
item_id=Capability(digits=4),
items_quantity=Capability(min_size=1, digits=4, decimals=3),
item_price=Capability(digits=6, decimals=2),
item_description=Capability(max_len=29),
payment_value=Capability(digits=12, decimals=2),
promotional_message=Capability(max_len=320),
payment_description=Capability(max_len=48),
customer_name=Capability(max_len=30),
customer_id=Capability(max_len=28),
customer_address=Capability(max_len=80),
add_cash_value=Capability(min_size=0.1, digits=12, decimals=2),
remove_cash_value=Capability(min_size=0.1, digits=12, decimals=2),
)
def get_constants(self):
return self._consts
def get_tax_constants(self):
constants = []
constants.append((TaxType.CUSTOM,
'01',
Decimal('18.00')))
constants.append((TaxType.CUSTOM,
'02',
Decimal('25.00')))
constants.extend([
(TaxType.SUBSTITUTION, 'FF', None),
(TaxType.EXEMPTION, 'II', None),
(TaxType.NONE, 'NN', None),
])
return constants
def get_payment_constants(self):
methods = []
methods.append(('01', 'DINHEIRO'))
methods.append(('02', 'CHEQUE'))
return methods
def get_sintegra(self):
taxes = []
taxes.append(('2500', Decimal("0")))
taxes.append(('1800', Decimal("0")))
taxes.append(('CANC', Decimal("0")))
taxes.append(('DESC', Decimal("0")))
taxes.append(('I', Decimal("0")))
taxes.append(('N', Decimal("0")))
taxes.append(('F', Decimal("0")))
return SintegraData(
opening_date=datetime.date(2000, 1, 1),
serial=self._get_serial(),
serial_id='001',
coupon_start=0,
coupon_end=100,
cro=230,
crz=1232,
coo=320,
period_total=Decimal("1123"),
total=Decimal("2311123"),
taxes=taxes)
# Device detection, asynchronous
def query_status(self):
return 'XXX'
def status_reply_complete(self, reply):
return len(reply) == 23
def get_serial(self):
return 'ABC12345678'
| lgpl-2.1 | -2,792,795,015,464,589,300 | 27.737705 | 78 | 0.610953 | false |
waheedahmed/edx-platform | openedx/core/djangoapps/api_admin/views.py | 1 | 9646 | """Views for API management."""
import logging
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse_lazy, reverse
from django.http.response import JsonResponse
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.generic import View
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView
from oauth2_provider.generators import generate_client_secret, generate_client_id
from oauth2_provider.models import get_application_model
from oauth2_provider.views import ApplicationRegistration
from slumber.exceptions import HttpNotFoundError
from edxmako.shortcuts import render_to_response
from openedx.core.djangoapps.api_admin.decorators import require_api_access
from openedx.core.djangoapps.api_admin.forms import ApiAccessRequestForm, CatalogForm
from openedx.core.djangoapps.api_admin.models import ApiAccessRequest, Catalog
from openedx.core.djangoapps.api_admin.utils import course_discovery_api_client
log = logging.getLogger(__name__)
Application = get_application_model() # pylint: disable=invalid-name
class ApiRequestView(CreateView):
"""Form view for requesting API access."""
form_class = ApiAccessRequestForm
template_name = 'api_admin/api_access_request_form.html'
success_url = reverse_lazy('api_admin:api-status')
def get(self, request):
"""
If the requesting user has already requested API access, redirect
them to the client creation page.
"""
if ApiAccessRequest.api_access_status(request.user) is not None:
return redirect(reverse('api_admin:api-status'))
return super(ApiRequestView, self).get(request)
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.site = get_current_site(self.request)
return super(ApiRequestView, self).form_valid(form)
class ApiRequestStatusView(ApplicationRegistration):
"""View for confirming our receipt of an API request."""
success_url = reverse_lazy('api_admin:api-status')
def get(self, request, form=None): # pylint: disable=arguments-differ
"""
If the user has not created an API request, redirect them to the
request form. Otherwise, display the status of their API
request. We take `form` as an optional argument so that we can
display validation errors correctly on the page.
"""
if form is None:
form = self.get_form_class()()
user = request.user
try:
api_request = ApiAccessRequest.objects.get(user=user)
except ApiAccessRequest.DoesNotExist:
return redirect(reverse('api_admin:api-request'))
try:
application = Application.objects.get(user=user)
except Application.DoesNotExist:
application = None
# We want to fill in a few fields ourselves, so remove them
# from the form so that the user doesn't see them.
for field in ('client_type', 'client_secret', 'client_id', 'authorization_grant_type'):
form.fields.pop(field)
return render_to_response('api_admin/status.html', {
'status': api_request.status,
'api_support_link': settings.API_DOCUMENTATION_URL,
'api_support_email': settings.API_ACCESS_MANAGER_EMAIL,
'form': form,
'application': application,
})
def get_form(self, form_class=None):
form = super(ApiRequestStatusView, self).get_form(form_class)
# Copy the data, since it's an immutable QueryDict.
copied_data = form.data.copy()
# Now set the fields that were removed earlier. We give them
# confidential client credentials, and generate their client
# ID and secret.
copied_data.update({
'authorization_grant_type': Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_CONFIDENTIAL,
'client_secret': generate_client_secret(),
'client_id': generate_client_id(),
})
form.data = copied_data
return form
def form_valid(self, form):
# Delete any existing applications if the user has decided to regenerate their credentials
Application.objects.filter(user=self.request.user).delete()
return super(ApiRequestStatusView, self).form_valid(form)
def form_invalid(self, form):
return self.get(self.request, form)
@require_api_access
def post(self, request):
return super(ApiRequestStatusView, self).post(request)
class ApiTosView(TemplateView):
"""View to show the API Terms of Service."""
template_name = 'api_admin/terms_of_service.html'
class CatalogSearchView(View):
"""View to search for catalogs belonging to a user."""
def get(self, request):
"""Display a form to search for catalogs belonging to a user."""
return render_to_response('api_admin/catalogs/search.html')
def post(self, request):
"""Redirect to the list view for the given user."""
username = request.POST.get('username')
# If no username is provided, bounce back to this page.
if not username:
return redirect(reverse('api_admin:catalog-search'))
return redirect(reverse('api_admin:catalog-list', kwargs={'username': username}))
class CatalogListView(View):
"""View to list existing catalogs and create new ones."""
template = 'api_admin/catalogs/list.html'
def _get_catalogs(self, client, username):
"""Retrieve catalogs for a user. Returns the empty list if none are found."""
try:
response = client.api.v1.catalogs.get(username=username)
return [Catalog(attributes=catalog) for catalog in response['results']]
except HttpNotFoundError:
return []
def get(self, request, username):
"""Display a list of a user's catalogs."""
client = course_discovery_api_client(request.user)
catalogs = self._get_catalogs(client, username)
return render_to_response(self.template, {
'username': username,
'catalogs': catalogs,
'form': CatalogForm(initial={'viewers': [username]}),
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
})
def post(self, request, username):
"""Create a new catalog for a user."""
form = CatalogForm(request.POST)
client = course_discovery_api_client(request.user)
if not form.is_valid():
catalogs = self._get_catalogs(client, username)
return render_to_response(self.template, {
'form': form,
'catalogs': catalogs,
'username': username,
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
}, status=400)
attrs = form.instance.attributes
catalog = client.api.v1.catalogs.post(attrs)
return redirect(reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog['id']}))
class CatalogEditView(View):
"""View to edit an individual catalog."""
def get(self, request, catalog_id):
"""Display a form to edit this catalog."""
client = course_discovery_api_client(request.user)
response = client.api.v1.catalogs(catalog_id).get()
catalog = Catalog(attributes=response)
form = CatalogForm(instance=catalog)
return render_to_response('api_admin/catalogs/edit.html', {
'catalog': catalog,
'form': form,
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
})
def post(self, request, catalog_id):
"""Update or delete this catalog."""
client = course_discovery_api_client(request.user)
if request.POST.get('delete-catalog') == 'on':
client.api.v1.catalogs(catalog_id).delete()
return redirect(reverse('api_admin:catalog-search'))
form = CatalogForm(request.POST)
if not form.is_valid():
response = client.api.v1.catalogs(catalog_id).get()
catalog = Catalog(attributes=response)
return render_to_response('api_admin/catalogs/edit.html', {
'catalog': catalog,
'form': form,
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
}, status=400)
catalog = client.api.v1.catalogs(catalog_id).patch(form.instance.attributes)
return redirect(reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog['id']}))
class CatalogPreviewView(View):
"""Endpoint to preview courses for a query."""
def get(self, request):
"""
Return the results of a query against the course catalog API. If no
query parameter is given, returns an empty result set.
"""
client = course_discovery_api_client(request.user)
# Just pass along the request params including limit/offset pagination
if 'q' in request.GET:
results = client.api.v1.courses.get(**request.GET)
# Ensure that we don't just return all the courses if no query is given
else:
results = {'count': 0, 'results': [], 'next': None, 'prev': None}
return JsonResponse(results)
| agpl-3.0 | -1,986,569,222,691,623,200 | 40.222222 | 98 | 0.651669 | false |
zhangsu/amphtml | validator/build.py | 6 | 22339 | #!/usr/bin/env python
#
# Copyright 2015 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""A build script which (thus far) works on Ubuntu 14."""
from __future__ import print_function
import argparse
import glob
import logging
import os
import platform
import re
import subprocess
import sys
def Die(msg):
"""Prints error and exits with status 1.
Args:
msg: The error message to emit
"""
print(msg, file=sys.stderr)
sys.exit(1)
def EnsureNodeJsIsInstalled():
"""Ensure Node.js is installed and that 'node' is the command to run."""
logging.info('entering ...')
try:
output = subprocess.check_output(['node', '--eval', 'console.log("42")'])
if b'42' == output.strip():
return
except (subprocess.CalledProcessError, OSError):
pass
Die('Node.js not found. Try "apt-get install nodejs" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation')
def CheckPrereqs():
"""Checks that various prerequisites for this script are satisfied."""
logging.info('entering ...')
if platform.system() != 'Linux' and platform.system() != 'Darwin':
Die('Sorry, this script assumes Linux or Mac OS X thus far. '
'Please feel free to edit the source and fix it to your needs.')
# Ensure source files are available.
for f in [
'validator-main.protoascii', 'validator.proto', 'validator_gen_js.py',
'package.json', 'engine/validator.js', 'engine/validator_test.js',
'engine/validator-in-browser.js', 'engine/tokenize-css.js',
'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js'
]:
if not os.path.exists(f):
Die('%s not found. Must run in amp_validator source directory.' % f)
# Ensure protoc is available.
try:
libprotoc_version = subprocess.check_output(['protoc', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('Protobuf compiler not found. Try "apt-get install protobuf-compiler" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation.')
# Ensure 'libprotoc 2.5.0' or newer.
m = re.search(b'^(\\w+) (\\d+)\\.(\\d+)\\.(\\d+)', libprotoc_version)
if (m.group(1) != b'libprotoc' or
(int(m.group(2)), int(m.group(3)), int(m.group(4))) < (2, 5, 0)):
Die('Expected libprotoc 2.5.0 or newer, saw: %s' % libprotoc_version)
# Ensure that the Python protobuf package is installed.
for m in ['descriptor', 'text_format', 'json_format']:
module = 'google.protobuf.%s' % m
try:
__import__(module)
except ImportError:
# Python3 needs pip3. Python 2 needs pip.
if sys.version_info < (3, 0):
Die('%s not found. Try "pip install protobuf" or follow the install '
'instructions at https://github.com/ampproject/amphtml/blob/master/'
'validator/README.md#installation' % module)
else:
Die('%s not found. Try "pip3 install protobuf" or follow the install '
'instructions at https://github.com/ampproject/amphtml/blob/master/'
'validator/README.md#installation' % module)
# Ensure that yarn is installed.
try:
subprocess.check_output(['yarn', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('Yarn package manager not found. Run '
'"curl -o- -L https://yarnpkg.com/install.sh | bash" '
'or see https://yarnpkg.com/docs/install.')
# Ensure JVM installed. TODO: Check for version?
try:
subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
Die('Java missing. Try "apt-get install openjdk-7-jre" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation')
logging.info('... done')
def SetupOutDir(out_dir):
"""Sets up a clean output directory.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
if os.path.exists(out_dir):
subprocess.check_call(['rm', '-rf', out_dir])
os.mkdir(out_dir)
logging.info('... done')
def InstallNodeDependencies():
"""Installs the dependencies using yarn."""
logging.info('entering ...')
# Install the project dependencies specified in package.json into
# node_modules.
logging.info('installing AMP Validator engine dependencies ...')
subprocess.check_call(
['yarn', 'install'],
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('installing AMP Validator nodejs dependencies ...')
subprocess.check_call(
['yarn', 'install'],
cwd='nodejs',
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('... done')
def GenValidatorPb2Py(out_dir):
"""Calls the proto compiler to generate validator_pb2.py.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
subprocess.check_call(
['protoc', 'validator.proto', '--python_out=%s' % out_dir])
open('%s/__init__.py' % out_dir, 'w').close()
logging.info('... done')
def GenValidatorProtoascii(out_dir):
"""Assembles the validator protoascii file from the main and extensions.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
protoascii_segments = [open('validator-main.protoascii').read()]
extensions = glob.glob('extensions/*/validator-*.protoascii')
# In the Github project, the extensions are located in a sibling directory
# to the validator rather than a child directory.
if not extensions:
extensions = glob.glob('../extensions/*/validator-*.protoascii')
extensions.sort()
for extension in extensions:
protoascii_segments.append(open(extension).read())
f = open('%s/validator.protoascii' % out_dir, 'w')
f.write(''.join(protoascii_segments))
f.close()
logging.info('... done')
def GenValidatorProtoGeneratedJs(out_dir):
"""Calls validator_gen_js to generate validator-proto-generated.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
# These imports happen late, within this method because they don't necessarily
# exist when the module starts running, and the ones that probably do
# are checked by CheckPrereqs.
# pylint: disable=g-import-not-at-top
from google.protobuf import text_format
from google.protobuf import descriptor
from dist import validator_pb2
import validator_gen_js
# pylint: enable=g-import-not-at-top
out = []
validator_gen_js.GenerateValidatorGeneratedJs(
specfile=None,
validator_pb2=validator_pb2,
generate_proto_only=True,
generate_spec_only=False,
text_format=text_format,
html_format=None,
descriptor=descriptor,
out=out)
out.append('')
f = open('%s/validator-proto-generated.js' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
logging.info('... done')
def GenValidatorGeneratedJs(out_dir):
"""Calls validator_gen_js to generate validator-generated.js and validator-generated.json.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
# These imports happen late, within this method because they don't necessarily
# exist when the module starts running, and the ones that probably do
# are checked by CheckPrereqs.
# pylint: disable=g-import-not-at-top
from google.protobuf import text_format
from google.protobuf import json_format
from google.protobuf import descriptor
from dist import validator_pb2
import validator_gen_js
# pylint: enable=g-import-not-at-top
out = []
validator_gen_js.GenerateValidatorGeneratedJs(
specfile='%s/validator.protoascii' % out_dir,
validator_pb2=validator_pb2,
generate_proto_only=False,
generate_spec_only=True,
text_format=text_format,
html_format=None,
descriptor=descriptor,
out=out)
out.append('')
f = open('%s/validator-generated.js' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
out = []
validator_gen_js.GenerateValidatorGeneratedJson(
specfile='%s/validator.protoascii' % out_dir,
validator_pb2=validator_pb2,
text_format=text_format,
json_format=json_format,
out=out)
out.append('')
f = open('%s/validator-generated.json' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
logging.info('... done')
def CompileWithClosure(js_files, definitions, entry_points, output_file):
"""Compiles the arguments with the Closure compiler for transpilation to ES5.
Args:
js_files: list of files to compile
definitions: list of definitions flags to closure compiler
entry_points: entry points (these won't be minimized)
output_file: name of the Javascript output file
"""
cmd = [
'java', '-jar', 'node_modules/google-closure-compiler-java/compiler.jar',
'--language_out=ES5_STRICT', '--dependency_mode=STRICT',
'--js_output_file=%s' % output_file
]
cmd += ['--entry_point=%s' % e for e in entry_points]
cmd += ['--output_manifest=%s' % ('%s.manifest' % output_file)]
cmd += [
'node_modules/google-closure-library/closure/**.js',
'!node_modules/google-closure-library/closure/**_test.js',
'node_modules/google-closure-library/third_party/closure/**.js',
'!node_modules/google-closure-library/third_party/closure/**_test.js'
]
cmd += js_files
cmd += definitions
subprocess.check_call(cmd)
def CompileValidatorMinified(out_dir):
"""Generates a minified validator script, which can be imported to validate.
Args:
out_dir: output directory
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/htmlparser.js',
'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js', 'engine/tokenize-css.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir,
'engine/validator-in-browser.js', 'engine/validator.js',
'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js',
'engine/htmlparser-interface.js'
],
definitions=[],
entry_points=[
'amp.validator.validateString',
'amp.validator.renderValidationResult',
'amp.validator.renderErrorMessage'
],
output_file='%s/validator_minified.js' % out_dir)
logging.info('... done')
def RunSmokeTest(out_dir):
"""Runs a smoke test (minimum valid AMP and empty html file).
Args:
out_dir: output directory
"""
logging.info('entering ...')
# Run index.js on the minimum valid amp and observe that it passes.
p = subprocess.Popen(
[
'node', 'nodejs/index.js', '--validator_js',
'%s/validator_minified.js' % out_dir,
'testdata/feature_tests/minimum_valid_amp.html', '--format=text'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if (b'testdata/feature_tests/minimum_valid_amp.html: PASS\n', b'',
p.returncode) != (stdout, stderr, 0):
Die('Smoke test failed. returncode=%d stdout="%s" stderr="%s"' %
(p.returncode, stdout, stderr))
# Run index.js on an empty file and observe that it fails.
p = subprocess.Popen(
[
'node', 'nodejs/index.js', '--validator_js',
'%s/validator_minified.js' % out_dir,
'testdata/feature_tests/empty.html', '--format=text'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 1:
Die('smoke test failed. Expected p.returncode==1, saw: %s' % p.returncode)
if not stderr.startswith(b'testdata/feature_tests/empty.html:1:0 '
b'The mandatory tag \'html'):
Die('smoke test failed; stderr was: "%s"' % stderr)
logging.info('... done')
def RunIndexTest():
"""Runs the index_test.js, which tests the NodeJS API.
"""
logging.info('entering ...')
p = subprocess.Popen(
['node', './index_test.js'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd='nodejs')
(stdout, stderr) = p.communicate()
if p.returncode != 0:
Die('index_test.js failed. returncode=%d stdout="%s" stderr="%s"' %
(p.returncode, stdout, stderr))
logging.info('... done')
def CompileValidatorTestMinified(out_dir):
"""Runs closure compiler for validator_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/htmlparser.js',
'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js', 'engine/tokenize-css.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir,
'engine/validator-in-browser.js', 'engine/validator.js',
'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js',
'engine/htmlparser-interface.js', 'engine/validator_test.js'
],
definitions=[],
entry_points=['amp.validator.ValidatorTest'],
output_file='%s/validator_test_minified.js' % out_dir)
logging.info('... success')
def CompileHtmlparserTestMinified(out_dir):
"""Runs closure compiler for htmlparser_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/htmlparser.js', 'engine/htmlparser-interface.js',
'engine/htmlparser_test.js'
],
definitions=[],
entry_points=['amp.htmlparser.HtmlParserTest'],
output_file='%s/htmlparser_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseCssTestMinified(out_dir):
"""Runs closure compiler for parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-url.js',
'engine/tokenize-css.js', 'engine/css-selectors.js',
'engine/json-testutil.js', 'engine/parse-css_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.ParseCssTest'],
output_file='%s/parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseUrlTestMinified(out_dir):
"""Runs closure compiler for parse-url_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-url.js', 'engine/parse-css.js',
'engine/tokenize-css.js', 'engine/css-selectors.js',
'engine/json-testutil.js', 'engine/parse-url_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_url.ParseURLTest'],
output_file='%s/parse-url_test_minified.js' % out_dir)
logging.info('... success')
def CompileAmp4AdsParseCssTestMinified(out_dir):
"""Runs closure compiler for amp4ads-parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/amp4ads-parse-css_test.js',
'engine/parse-css.js', 'engine/parse-url.js',
'engine/amp4ads-parse-css.js', 'engine/tokenize-css.js',
'engine/css-selectors.js', 'engine/json-testutil.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.Amp4AdsParseCssTest'],
output_file='%s/amp4ads-parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileKeyframesParseCssTestMinified(out_dir):
"""Runs closure compiler for keyframes-parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/keyframes-parse-css_test.js',
'engine/parse-css.js', 'engine/parse-url.js',
'engine/keyframes-parse-css.js', 'engine/tokenize-css.js',
'engine/css-selectors.js', 'engine/json-testutil.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.KeyframesParseCssTest'],
output_file='%s/keyframes-parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseSrcsetTestMinified(out_dir):
"""Runs closure compiler for parse-srcset_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-srcset.js',
'engine/json-testutil.js', 'engine/parse-srcset_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_srcset.ParseSrcsetTest'],
output_file='%s/parse-srcset_test_minified.js' % out_dir)
logging.info('... success')
def GenerateTestRunner(out_dir):
"""Generates a test runner: a nodejs script that runs our minified tests.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
f = open('%s/test_runner' % out_dir, 'w')
extensions_dir = 'extensions'
# In the Github project, the extensions are located in a sibling directory
# to the validator rather than a child directory.
if not os.path.isdir(extensions_dir):
extensions_dir = '../extensions'
f.write("""#!/usr/bin/env node
global.assert = require('assert');
global.fs = require('fs');
global.path = require('path');
var JasmineRunner = require('jasmine');
var jasmine = new JasmineRunner();
process.env.TESTDATA_ROOTS = 'testdata:%s'
require('./validator_test_minified');
require('./htmlparser_test_minified');
require('./parse-css_test_minified');
require('./parse-url_test_minified');
require('./amp4ads-parse-css_test_minified');
require('./keyframes-parse-css_test_minified');
require('./parse-srcset_test_minified');
jasmine.onComplete(function (passed) {
process.exit(passed ? 0 : 1);
});
jasmine.execute();
""" % extensions_dir)
os.chmod('%s/test_runner' % out_dir, 0o750)
logging.info('... success')
def RunTests(update_tests, out_dir):
"""Runs all the minified tests.
Args:
update_tests: a boolean indicating whether or not to update the test
output files.
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
env = os.environ.copy()
if update_tests:
env['UPDATE_VALIDATOR_TEST'] = '1'
subprocess.check_call(['node', '%s/test_runner' % out_dir], env=env)
logging.info('... success')
def Main(parsed_args):
"""The main method, which executes all build steps and runs the tests."""
logging.basicConfig(
format='[[%(filename)s %(funcName)s]] - %(message)s',
level=(logging.ERROR if os.environ.get('TRAVIS') else logging.INFO))
EnsureNodeJsIsInstalled()
CheckPrereqs()
InstallNodeDependencies()
SetupOutDir(out_dir='dist')
GenValidatorProtoascii(out_dir='dist')
GenValidatorPb2Py(out_dir='dist')
GenValidatorProtoGeneratedJs(out_dir='dist')
GenValidatorGeneratedJs(out_dir='dist')
CompileValidatorMinified(out_dir='dist')
RunSmokeTest(out_dir='dist')
RunIndexTest()
CompileValidatorTestMinified(out_dir='dist')
CompileHtmlparserTestMinified(out_dir='dist')
CompileParseCssTestMinified(out_dir='dist')
CompileParseUrlTestMinified(out_dir='dist')
CompileAmp4AdsParseCssTestMinified(out_dir='dist')
CompileKeyframesParseCssTestMinified(out_dir='dist')
CompileParseSrcsetTestMinified(out_dir='dist')
GenerateTestRunner(out_dir='dist')
RunTests(update_tests=parsed_args.update_tests, out_dir='dist')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Build script for the AMP Validator.')
parser.add_argument(
'--update_tests',
action='store_true',
help=('If True, validator_test will overwrite the .out test files with '
'the encountered test output.'))
Main(parser.parse_args())
| apache-2.0 | -8,134,788,603,869,757,000 | 34.45873 | 201 | 0.655625 | false |
chfoo/fogchamp | util/csv2json.py | 1 | 4170 | '''Convert CSV files into JSON files needed for the visualizer page.'''
import argparse
import json
import os
import functools
from util.readers.addarash1 import AddarashReader
from util.readers.bulbapedia import BulbapediaReader
from util.readers.chfoo import ChfooReader
from util.readers.editornotes import EditorNotesReader
from util.readers.nkekev import NkekevReader
from util.readers.pokedex import PokedexReader
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--output-dir', default='./')
arg_parser.add_argument('--metadata-dir', default='metadata/')
args = arg_parser.parse_args()
nkekev_dir = os.path.join(args.metadata_dir, 'nkekev')
chfoo_dir = os.path.join(args.metadata_dir, 'chfoo')
addarash1_dir = os.path.join(args.metadata_dir, 'addarash1')
pokedex_dir = os.path.join(args.metadata_dir, 'pokedex', 'pokedex', 'data', 'csv')
bulbapedia_dir = os.path.join(args.metadata_dir, 'bulbapedia')
editor_notes_dir = os.path.join(args.metadata_dir, 'editor_notes')
output_dir = args.output_dir
pokedex_reader = PokedexReader(pokedex_dir)
nkekev_reader = NkekevReader(nkekev_dir)
chfoo_reader = ChfooReader(chfoo_dir)
addarash1_reader = AddarashReader(addarash1_dir)
bulbapedia_reader = BulbapediaReader(bulbapedia_dir)
editor_notes_reader = EditorNotesReader(editor_notes_dir)
# Build each Pokemon's stats
movesets_funcs = [
('pbr-2.0', functools.partial(
addarash1_reader.read_pbr_2_0, nkekev_reader, chfoo_reader)),
('pbr-gold-1.2-2015-11-07', functools.partial(
addarash1_reader.read_pbr_gold_1_2_2015_11_07,
nkekev_reader, chfoo_reader)),
('pbr-gold-1.2', functools.partial(addarash1_reader.read_pbr_gold_1_2,
nkekev_reader, chfoo_reader)),
('pbr-seel', functools.partial(chfoo_reader.read_pbr_seel, nkekev_reader)),
('pbr-platinum', nkekev_reader.read_pbr_platinum),
('pbr-gold', nkekev_reader.read_pbr_gold),
]
for move_slug, func in movesets_funcs:
pokemon_stats = {}
pokemon_slugs = []
pokemon_types = pokedex_reader.read_pokemon_types()
pokemon_weights = pokedex_reader.read_pokemon_weights()
for pokemon_stat in func():
slug = pokemon_stat.pop('slug')
pokemon_slugs.append(slug)
pokemon_stats[slug] = pokemon_stat
pokemon_stats[slug]['types'] = pokemon_types[pokemon_stat['number']]
pokemon_stats[slug]['weight'] = pokemon_weights[pokemon_stat['number']]
json_path = os.path.join(output_dir, '{}.json'.format(move_slug))
with open(json_path, 'w') as file:
file.write(json.dumps({
'stats': pokemon_stats,
'pokemon_slugs': pokemon_slugs
}, indent=2, sort_keys=True))
# Build all the moves
move_stats = {}
for move in pokedex_reader.read_moves():
slug = move.pop('slug')
move_stats[slug] = move
bulbapedia_reader.downgrade_move_changes(move_stats)
editor_notes_reader.add_move_notes(move_stats)
json_path = os.path.join(output_dir, 'moves.json')
with open(json_path, 'w') as file:
file.write(json.dumps(move_stats, indent=2, sort_keys=True))
# Build descriptions and misc
abilities = {}
for ability in pokedex_reader.read_abilities():
slug = ability.pop('slug')
abilities[slug] = ability
editor_notes_reader.add_ability_notes(abilities)
types_efficacy = pokedex_reader.read_type_efficacy()
items = {}
for item in pokedex_reader.read_items():
slug = item.pop('slug')
items[slug] = item
item_renames = bulbapedia_reader.get_item_renames_map()
json_path = os.path.join(output_dir, 'descriptions.json')
with open(json_path, 'w') as file:
file.write(json.dumps({
'abilities': abilities,
'types_efficacy': types_efficacy,
'items': items,
'item_renames': item_renames,
}, indent=2, sort_keys=True))
if __name__ == '__main__':
main()
| mit | -2,807,500,552,308,781,000 | 35.26087 | 86 | 0.642206 | false |
vpelletier/neoppod | neo/lib/event.py | 1 | 9556 | #
# Copyright (C) 2006-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, thread
from time import time
from select import epoll, EPOLLIN, EPOLLOUT, EPOLLERR, EPOLLHUP
from errno import EAGAIN, EEXIST, EINTR, ENOENT
from . import logging
from .locking import Lock
class EpollEventManager(object):
"""This class manages connections and events based on epoll(5)."""
_timeout = None
_trigger_exit = False
def __init__(self):
self.connection_dict = {}
# Initialize a dummy 'unregistered' for the very rare case a registered
# connection is closed before the first call to poll. We don't care
# leaking a few integers for connections closed between 2 polls.
self.unregistered = []
self.reader_set = set()
self.writer_set = set()
self.epoll = epoll()
self._pending_processing = []
self._trigger_fd, w = os.pipe()
os.close(w)
self._trigger_lock = Lock()
def close(self):
os.close(self._trigger_fd)
for c in self.connection_dict.values():
c.close()
del self.__dict__
def getConnectionList(self):
# XXX: use index
return [x for x in self.connection_dict.itervalues()
if not x.isAborted()]
def getClientList(self):
# XXX: use index
return [c for c in self.getConnectionList() if c.isClient()]
def getServerList(self):
# XXX: use index
return [c for c in self.getConnectionList() if c.isServer()]
def getConnectionListByUUID(self, uuid):
""" Return the connection associated to the UUID, None if the UUID is
None, invalid or not found"""
# XXX: use index
# XXX: consider remove UUID from connection and thus this method
if uuid is None:
return None
result = []
append = result.append
for conn in self.getConnectionList():
if conn.getUUID() == uuid:
append(conn)
return result
# epoll_wait always waits for EPOLLERR & EPOLLHUP so we're forced
# to unregister when we want to ignore all events for a connection.
def register(self, conn, timeout_only=False):
fd = conn.getConnector().getDescriptor()
self.connection_dict[fd] = conn
if timeout_only:
self.wakeup()
else:
self.epoll.register(fd)
self.addReader(conn)
def unregister(self, conn):
new_pending_processing = [x for x in self._pending_processing
if x is not conn]
# Check that we removed at most one entry from
# self._pending_processing .
assert len(new_pending_processing) > len(self._pending_processing) - 2
self._pending_processing = new_pending_processing
fd = conn.getConnector().getDescriptor()
try:
del self.connection_dict[fd]
self.unregistered.append(fd)
self.epoll.unregister(fd)
except KeyError:
pass
except IOError, e:
if e.errno != ENOENT:
raise
else:
self.reader_set.discard(fd)
self.writer_set.discard(fd)
def isIdle(self):
return not (self._pending_processing or self.writer_set)
def _addPendingConnection(self, conn):
pending_processing = self._pending_processing
if conn not in pending_processing:
pending_processing.append(conn)
def poll(self, blocking=1):
if not self._pending_processing:
# Fetch messages from polled file descriptors
self._poll(blocking)
if not self._pending_processing:
return
to_process = self._pending_processing.pop(0)
try:
to_process.process()
finally:
# ...and requeue if there are pending messages
if to_process.hasPendingMessages():
self._addPendingConnection(to_process)
# Non-blocking call: as we handled a packet, we should just offer
# poll a chance to fetch & send already-available data, but it must
# not delay us.
self._poll(0)
def _poll(self, blocking):
if blocking:
timeout = self._timeout
timeout_object = self
for conn in self.connection_dict.itervalues():
t = conn.getTimeout()
if t and (timeout is None or t < timeout):
timeout = t
timeout_object = conn
# Make sure epoll_wait does not return too early, because it has a
# granularity of 1ms and Python 2.7 rounds the timeout towards zero.
# See also https://bugs.python.org/issue20452 (fixed in Python 3).
blocking = .001 + max(0, timeout - time()) if timeout else -1
try:
event_list = self.epoll.poll(blocking)
except IOError, exc:
if exc.errno in (0, EAGAIN):
logging.info('epoll.poll triggered undocumented error %r',
exc.errno)
elif exc.errno != EINTR:
raise
return
if event_list:
self.unregistered = unregistered = []
wlist = []
elist = []
for fd, event in event_list:
if event & EPOLLIN:
conn = self.connection_dict[fd]
if conn.readable():
self._addPendingConnection(conn)
if event & EPOLLOUT:
wlist.append(fd)
if event & (EPOLLERR | EPOLLHUP):
elist.append(fd)
for fd in wlist:
if fd not in unregistered:
self.connection_dict[fd].writable()
for fd in elist:
if fd in unregistered:
continue
try:
conn = self.connection_dict[fd]
except KeyError:
assert fd == self._trigger_fd, fd
with self._trigger_lock:
self.epoll.unregister(fd)
if self._trigger_exit:
del self._trigger_exit
thread.exit()
continue
if conn.readable():
self._addPendingConnection(conn)
elif blocking > 0:
logging.debug('timeout triggered for %r', timeout_object)
timeout_object.onTimeout()
def onTimeout(self):
on_timeout = self._on_timeout
del self._on_timeout
self._timeout = None
on_timeout()
def setTimeout(self, *args):
self._timeout, self._on_timeout = args
def wakeup(self, exit=False):
with self._trigger_lock:
self._trigger_exit |= exit
try:
self.epoll.register(self._trigger_fd)
except IOError, e:
# Ignore if 'wakeup' is called several times in a row.
if e.errno != EEXIST:
raise
def addReader(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd not in self.reader_set:
self.reader_set.add(fd)
self.epoll.modify(fd, EPOLLIN | (
fd in self.writer_set and EPOLLOUT))
def removeReader(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd in self.reader_set:
self.reader_set.remove(fd)
self.epoll.modify(fd, fd in self.writer_set and EPOLLOUT)
def addWriter(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd not in self.writer_set:
self.writer_set.add(fd)
self.epoll.modify(fd, EPOLLOUT | (
fd in self.reader_set and EPOLLIN))
def removeWriter(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd in self.writer_set:
self.writer_set.remove(fd)
self.epoll.modify(fd, fd in self.reader_set and EPOLLIN)
def log(self):
logging.info('Event Manager:')
logging.info(' Readers: %r', list(self.reader_set))
logging.info(' Writers: %r', list(self.writer_set))
logging.info(' Connections:')
pending_set = set(self._pending_processing)
for fd, conn in self.connection_dict.items():
logging.info(' %r: %r (pending=%r)', fd, conn,
conn in pending_set)
# Default to EpollEventManager.
EventManager = EpollEventManager
| gpl-2.0 | 4,390,888,818,905,951,000 | 36.03876 | 80 | 0.57409 | false |
kpech21/Greek-Stemmer | tests/lemmatizers/test_verb.py | 1 | 1883 | # -*- coding: utf-8 -*-
import pytest
from greek_stemmer.lemmatizers.verb import stem
class TestVerbStem:
# rule-set 1: check irregular verbs
verb_stem_ruleset1 = [
('', 'VB', ''),
('ΕΙΜΑΙ', 'VB', 'ΕΙ'),
('ΕΙΜΑΣΤΕ', 'VBS', 'ΕΙ'),
('ΠΩ', 'VB', 'Π'),
('ΖΕΙΤΕ', 'VBS', 'Ζ'),
('ΖΟΥΣΑΜΕ', 'VBDS', 'Ζ'),
('ΔΕΙ', 'VB', 'Δ')
]
@pytest.mark.parametrize('word, pos, output', verb_stem_ruleset1)
def test_verb_stem_with_ruleset1(self, word, pos, output):
assert stem(word, pos) == output
# rule-set 2: ACTIVE VOICE, Singular - PASSIVE VOICE, Singular
verb_stem_ruleset2 = [
('', 'VB', ''),
('ΠΑΙΖΕΙ', 'VB', 'ΠΑΙΖ'),
('ΤΡΟΦΟΔΟΤΟΥΜΑΙ', 'VB', 'ΤΡΟΦΟΔΟΤ'),
('ΒΙΑΖΟΣΟΥΝΑ', 'VBD', 'ΒΙΑΖ'),
('ΔΙΑΣΚΕΔΑΖΑ', 'VBD', 'ΔΙΑΣΚΕΔΑΖ'),
('ΤΡΟΦΟΔΟΤΕΙ', 'VBF', 'ΤΡΟΦΟΔΟΤ'),
('ΕΧΩ', 'MD', 'ΕΧ')
]
@pytest.mark.parametrize('word, pos, output', verb_stem_ruleset2)
def test_verb_stem_with_ruleset2(self, word, pos, output):
assert stem(word, pos) == output
# rule-set 3: ACTIVE VOICE, Plural - PASSIVE VOICE, Plural
verb_stem_ruleset3 = [
('', 'VBS', ''),
('ΑΠΟΤΕΛΕΙΣΤΕ', 'VBS', 'ΑΠΟΤΕΛ'),
('ΔΕΙΤΕ', 'VBS', 'Δ'),
('ΠΕΡΙΠΟΙΟΝΤΟΥΣΑΝ', 'VBDS', 'ΠΕΡΙΠΟ'),
('ΠΑΙΖΑΝ', 'VBDS', 'ΠΑΙΖ'),
('ΤΡΟΦΟΔΟΤΟΥΝ', 'VBFS', 'ΤΡΟΦΟΔΟΤ'),
('ΟΙΚΕΙΟΠΟΙΟΥΝΤΑΙ', 'VBS', 'ΟΙΚΕΙΟΠΟΙΟΥ')
]
@pytest.mark.parametrize('word, pos, output', verb_stem_ruleset3)
def test_verb_stem_with_various_ruleset3(self, word, pos, output):
assert stem(word, pos) == output
| lgpl-3.0 | -7,906,077,078,956,318,000 | 28.642857 | 70 | 0.533133 | false |
AndKyr/GETELEC | python/JFplot.py | 1 | 1648 | #! /usr/bin/python
import numpy as np
import getelec_mod as gt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mb
font = 30
# mb.rcParams["font.family"] = "Serif"
mb.rcParams["font.size"] = font
mb.rcParams["axes.labelsize"] = font
mb.rcParams["xtick.labelsize"] = font
mb.rcParams["ytick.labelsize"] = font
mb.rcParams["legend.fontsize"] = font
mb.rcParams["lines.linewidth"] = 2.5
fsize = (18,10)
Npoints = 256
Temps = [1.e-2, 300, 800, 1500]
Xfn = np.linspace(0.12, 0.35, 256)
F = 1./Xfn
Jem = np.copy(F)
this = gt.emission_create(W = 4.5, R = 5000., approx = 2)
fig1 = plt.figure(figsize=fsize)
ax1 = fig1.gca()
ax1.set_xlabel(r"$1/F$ [m GV$^{-1}$]")
ax1.set_ylabel(r"$J$ [A nm$^{-2}$]")
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i in range(len(Temps)):
this.Temp = Temps[i]
if (this.Temp < 10.):
this.approx = -1
else:
this.approx = 2
for j in range(len(F)):
this.F = F[j]
this.cur_dens()
Jem[j] = this.Jem
ax1.semilogy(Xfn,Jem, label = r'T = %d K'%this.Temp)
# for i in range(len(Temps)):
# this.Temp = Temps[i]
# if (this.Temp < 10.):
# this.approx = -1
# else:
# this.approx = -1
# for j in range(len(F)):
# this.F = F[j]
# this.cur_dens()
# Jem[j] = this.Jem
# ax1.semilogy(Xfn,Jem, '--', color = colors[i], label = r'T = %d K'%this.Temp)
# np.savetxt("J-F.dat", np.transpose(np.array([F,Jem])), delimiter = " ")
ax1.grid()
ax1.legend()
plt.savefig("JFplot_Tparam.svg")
plt.savefig("JFplot_Tparam.png")
plt.show()
| gpl-3.0 | -1,551,624,963,708,100,600 | 20.402597 | 83 | 0.586772 | false |
goyal-sidd/BLT | website/models.py | 1 | 10857 | import os
from urlparse import urlparse
import requests
import tweepy
from PIL import Image
from annoying.fields import AutoOneToOneField
from colorthief import ColorThief
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.db import models
from django.db.models import Count
from django.db.models import signals
from django.db.models.signals import post_save
from unidecode import unidecode
class Domain(models.Model):
name = models.CharField(max_length=255, unique=True)
url = models.URLField()
logo = models.ImageField(upload_to="logos", null=True, blank=True)
webshot = models.ImageField(upload_to="webshots", null=True, blank=True)
clicks = models.IntegerField(null=True, blank=True)
email_event = models.CharField(max_length=255, default="", null=True, blank=True)
color = models.CharField(max_length=10, null=True, blank=True)
github = models.CharField(max_length=255, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
twitter = models.CharField(max_length=30, null=True, blank=True)
facebook = models.URLField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
@property
def open_issues(self):
return Issue.objects.filter(domain=self).exclude(status="closed")
@property
def closed_issues(self):
return Issue.objects.filter(domain=self).filter(status="closed")
@property
def top_tester(self):
return User.objects.filter(issue__domain=self).annotate(total=Count('issue')).order_by('-total').first()
@property
def get_name(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
def get_logo(self):
if self.logo:
return self.logo.url
image_request = requests.get("https://logo.clearbit.com/" + self.name)
try:
if image_request.status_code == 200:
image_content = ContentFile(image_request.content)
self.logo.save(self.name + ".jpg", image_content)
return self.logo.url
except:
favicon_url = self.url + '/favicon.ico'
return favicon_url
@property
def get_color(self):
if self.color:
return self.color
else:
if not self.logo:
self.get_logo()
try:
color_thief = ColorThief(self.logo)
self.color = '#%02x%02x%02x' % color_thief.get_color(quality=1)
except:
self.color = "#0000ff"
self.save()
return self.color
@property
def hostname_domain(self):
parsed_url = urlparse(self.url)
return parsed_url.hostname
@property
def domain_name(self):
parsed_url = urlparse(self.url)
domain = parsed_url.hostname
temp = domain.rsplit('.')
if (len(temp) == 3):
domain = temp[1] + '.' + temp[2]
return domain
def get_absolute_url(self):
return "/domain/" + self.name
def validate_image(fieldfile_obj):
filesize = fieldfile_obj.file.size
megabyte_limit = 3.0
if filesize > megabyte_limit * 1024 * 1024:
raise ValidationError("Max file size is %sMB" % str(megabyte_limit))
class Issue(models.Model):
labels = (
(0, 'General'),
(1, 'Number Error'),
(2, 'Functional'),
(3, 'Performance'),
(4, 'Security'),
(5, 'Typo'),
(6, 'Design')
)
user = models.ForeignKey(User, null=True, blank=True)
domain = models.ForeignKey(Domain, null=True, blank=True)
url = models.URLField()
description = models.TextField()
label = models.PositiveSmallIntegerField(choices=labels, default=0)
views = models.IntegerField(null=True, blank=True)
status = models.CharField(max_length=10, default="open", null=True, blank=True)
user_agent = models.CharField(max_length=255, default="", null=True, blank=True)
ocr = models.TextField(default="", null=True, blank=True)
screenshot = models.ImageField(upload_to="screenshots", validators=[validate_image])
closed_by = models.ForeignKey(User, null=True, blank=True, related_name="closed_by")
closed_date = models.DateTimeField(default=None, null=True, blank=True)
github_url = models.URLField(default="", null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.description
@property
def domain_title(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
@property
def hostname_domain(self):
parsed_url = urlparse(self.url)
return parsed_url.hostname
@property
def domain_name(self):
parsed_url = urlparse(self.url)
domain = parsed_url.hostname
temp = domain.rsplit('.')
if (len(temp) == 3):
domain = temp[1] + '.' + temp[2]
return domain
def get_twitter_message(self):
issue_link = " bugheist.com/issue/" + str(self.id)
prefix = "Bug found on @"
spacer = " | "
msg = prefix + self.domain_title + spacer + self.description[:140 - (
len(prefix) + len(self.domain_title) + len(spacer) + len(issue_link))] + issue_link
return msg
def get_ocr(self):
if self.ocr:
return self.ocr
else:
try:
import pytesseract
self.ocr = pytesseract.image_to_string(Image.open(self.screenshot))
self.save()
return self.ocr
except:
return "OCR not installed"
@property
def get_absolute_url(self):
return "/issue/" + str(self.id)
class Meta:
ordering = ['-created']
TWITTER_MAXLENGTH = getattr(settings, 'TWITTER_MAXLENGTH', 140)
def post_to_twitter(sender, instance, *args, **kwargs):
if not kwargs.get('created'):
return False
try:
consumer_key = os.environ['TWITTER_CONSUMER_KEY']
consumer_secret = os.environ['TWITTER_CONSUMER_SECRET']
access_key = os.environ['TWITTER_ACCESS_KEY']
access_secret = os.environ['TWITTER_ACCESS_SECRET']
except KeyError:
print 'WARNING: Twitter account not configured.'
return False
try:
text = instance.get_twitter_message()
except AttributeError:
text = unicode(instance)
mesg = u'%s' % (text)
if len(mesg) > TWITTER_MAXLENGTH:
size = len(mesg + '...') - TWITTER_MAXLENGTH
mesg = u'%s...' % (text[:-size])
import logging
logger = logging.getLogger('testlogger')
if not settings.DEBUG:
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
file = default_storage.open(instance.screenshot.file.name, 'rb')
media_ids = api.media_upload(filename=unidecode(instance.screenshot.file.name), file=file)
params = dict(status=mesg, media_ids=[media_ids.media_id_string])
api.update_status(**params)
except Exception, ex:
print 'ERROR:', str(ex)
logger.debug('rem %s' % str(ex))
return False
signals.post_save.connect(post_to_twitter, sender=Issue)
class Hunt(models.Model):
user = models.ForeignKey(User)
url = models.URLField()
prize = models.IntegerField()
logo = models.ImageField(upload_to="logos", null=True, blank=True)
plan = models.CharField(max_length=10)
txn_id = models.CharField(max_length=50, null=True, blank=True)
color = models.CharField(max_length=10, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
@property
def domain_title(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
class Meta:
ordering = ['-id']
class Points(models.Model):
user = models.ForeignKey(User)
issue = models.ForeignKey(Issue, null=True, blank=True)
domain = models.ForeignKey(Domain, null=True, blank=True)
score = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# @receiver(user_logged_in, dispatch_uid="some.unique.string.id.for.allauth.user_logged_in")
# def user_logged_in_(request, user, **kwargs):
# if not settings.TESTING:
# action.send(user, verb='logged in')
class InviteFriend(models.Model):
sender = models.ForeignKey(User)
recipient = models.EmailField()
sent = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
ordering = ('-sent',)
verbose_name = 'invitation'
verbose_name_plural = 'invitations'
def user_images_path(instance, filename):
from django.template.defaultfilters import slugify
filename, ext = os.path.splitext(filename)
return 'avatars/user_{0}/{1}{2}'.format(instance.user.id, slugify(filename), ext)
class UserProfile(models.Model):
title = (
(0, 'Unrated'),
(1, 'Bronze'),
(2, 'Silver'),
(3, 'Gold'),
(4, 'Platinum'),
)
follows = models.ManyToManyField('self', related_name='follower', symmetrical=False, blank=True)
user = AutoOneToOneField('auth.user', related_name="userprofile")
user_avatar = models.ImageField(upload_to=user_images_path, blank=True, null=True)
title = models.IntegerField(choices=title, default=0)
winnings = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
issue_upvoted = models.ManyToManyField(Issue, blank=True, related_name="upvoted")
issue_saved = models.ManyToManyField(Issue, blank=True, related_name="saved")
def avatar(self, size=36):
if self.user_avatar:
return self.user_avatar.url
for account in self.user.socialaccount_set.all():
if 'avatar_url' in account.extra_data:
return account.extra_data['avatar_url']
elif 'picture' in account.extra_data:
return account.extra_data['picture']
def __unicode__(self):
return self.user.email
def create_profile(sender, **kwargs):
user = kwargs["instance"]
if kwargs["created"]:
profile = UserProfile(user=user)
profile.save()
post_save.connect(create_profile, sender=User)
| agpl-3.0 | 2,311,262,947,357,958,700 | 32.717391 | 112 | 0.634153 | false |
mark-r-g/hydrus | tests/test_rapidclus.py | 1 | 1819 | # Mark Gatheman <[email protected]>
#
# This file is part of Hydrus.
#
# Hydrus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hydrus. If not, see <http://www.gnu.org/licenses/>.
import random
from collections import Counter
import numpy as np
from hydrus.rapidclus import close_outer, close_inner, choose_initial_seeds
from hydrus.rapidclus import rapidclus
def test_close_inner():
assert close_inner([1, 3, 6, 10, 11]) == (3, 4, 10, 11, 1)
assert close_inner(range(1, 100, 5)) == (0, 1, 1, 6, 5)
def test_close_outer():
assert close_outer([1, 3, 6, 10, 11], 7) == (2, 6, 1)
assert close_outer([1, 3, 6, 10, 11], 0) == (0, 1, 1)
assert close_outer([1, 3, 6, 10, 11], 111) == (4, 11, 100)
def test_choose_initial_seeds():
assert choose_initial_seeds([1, 3, 6, 10, 11, 100], 3) == [1, 11, 100]
assert choose_initial_seeds([1, 3, 6, 10, 11, 100], 5) == [1, 3, 6, 11, 100]
random.seed(36261837)
data = [int(random.gauss(0,1000)) for _ in range(100)]
assert choose_initial_seeds(data, 5) == [-2376, -862, 521, 1948, 3239]
def test_rapidclus():
random.seed(12521184)
data = [random.gauss(0,1) for _ in range(1000)]
assert sorted(Counter(rapidclus(data)).values()) == [34, 41, 233, 251, 441]
assert rapidclus(data) == rapidclus(np.array(data))
| gpl-3.0 | -8,838,705,027,626,463,000 | 35.38 | 80 | 0.671798 | false |
googleapis/googleapis-gen | google/cloud/secrets/v1beta1/secretmanager-v1beta1-py/google/cloud/secretmanager/__init__.py | 1 | 3026 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.secretmanager_v1beta1.services.secret_manager_service.client import SecretManagerServiceClient
from google.cloud.secretmanager_v1beta1.services.secret_manager_service.async_client import SecretManagerServiceAsyncClient
from google.cloud.secretmanager_v1beta1.types.resources import Replication
from google.cloud.secretmanager_v1beta1.types.resources import Secret
from google.cloud.secretmanager_v1beta1.types.resources import SecretPayload
from google.cloud.secretmanager_v1beta1.types.resources import SecretVersion
from google.cloud.secretmanager_v1beta1.types.service import AccessSecretVersionRequest
from google.cloud.secretmanager_v1beta1.types.service import AccessSecretVersionResponse
from google.cloud.secretmanager_v1beta1.types.service import AddSecretVersionRequest
from google.cloud.secretmanager_v1beta1.types.service import CreateSecretRequest
from google.cloud.secretmanager_v1beta1.types.service import DeleteSecretRequest
from google.cloud.secretmanager_v1beta1.types.service import DestroySecretVersionRequest
from google.cloud.secretmanager_v1beta1.types.service import DisableSecretVersionRequest
from google.cloud.secretmanager_v1beta1.types.service import EnableSecretVersionRequest
from google.cloud.secretmanager_v1beta1.types.service import GetSecretRequest
from google.cloud.secretmanager_v1beta1.types.service import GetSecretVersionRequest
from google.cloud.secretmanager_v1beta1.types.service import ListSecretsRequest
from google.cloud.secretmanager_v1beta1.types.service import ListSecretsResponse
from google.cloud.secretmanager_v1beta1.types.service import ListSecretVersionsRequest
from google.cloud.secretmanager_v1beta1.types.service import ListSecretVersionsResponse
from google.cloud.secretmanager_v1beta1.types.service import UpdateSecretRequest
__all__ = ('SecretManagerServiceClient',
'SecretManagerServiceAsyncClient',
'Replication',
'Secret',
'SecretPayload',
'SecretVersion',
'AccessSecretVersionRequest',
'AccessSecretVersionResponse',
'AddSecretVersionRequest',
'CreateSecretRequest',
'DeleteSecretRequest',
'DestroySecretVersionRequest',
'DisableSecretVersionRequest',
'EnableSecretVersionRequest',
'GetSecretRequest',
'GetSecretVersionRequest',
'ListSecretsRequest',
'ListSecretsResponse',
'ListSecretVersionsRequest',
'ListSecretVersionsResponse',
'UpdateSecretRequest',
)
| apache-2.0 | -1,846,891,807,528,377,000 | 48.606557 | 123 | 0.826504 | false |
labase/surdonews | src/surdonews/leao/main.py | 1 | 5477 | from jqueryui import jq
from browser import document, html
from superpython.virgem.main import Sala, Labirinto, Cena, INVENTARIO # importando do virgem
STYLE = dict(position="absolute", width=300, left=0, top=0, background="blue") # mudar cor do background lá embaixo
STYLE["min-height"] = "300px"
IMAGEM = "http://s16.postimg.org/k81hwi2n9/Desert.jpg"
class Leao:
SETOR = None
def __init__(self):
pass
def monta(self):
NONE = [None] * 4
imn = "https://upload.wikimedia.org/wikipedia/commons/1/1e/Est%C3%BAdio_-_TV_Cultura_Montenegro.jpg"
iml = "http://mochilaotrips.com/wp-content/uploads/2013/03/IMG_1447.jpg"
ims = "https://upload.wikimedia.org/wikipedia/commons/0/01/Morro_de_Castelo_Branco,_aspectos_1,_Castelo_Branco,_concelho_da_Horta,_ilha_do_Faial,_A%C3%A7ores,_Portugal.JPG"
imo = "http://www.unicos.cc/wp-content/uploads/2014/12/jornalismo-1-951x476.jpg"
irl = "http://www.vipcomm.com.br/site/upload/sbHulk_GN_150614026.jpg"
iro = "https://blogpontodeonibus.files.wordpress.com/2013/02/photodownload-php.jpg"
iro = "http://imagens.canaltech.com.br/38560.54878-Tirar-fotos.jpg"
irn = "http://7diasverdes.com.br/wp-content/uploads/2013/07/Bicicleta-de-passeio.jpg"
irs = "http://www.boulevardshopping.com.br/novo/wp-content/uploads/2012/02/Mcdonalds.jpg"
isn = "http://www.comercialvidoto.com.br/site/wgc_media/photos/Banco-pe-de-Ferro-Tamandua.png"
isl = "http://andif.com.br/imagens/noticias/Banco_Santander_mjg.jpg"
iso = "http://imguol.com/2013/01/08/fiat-mille-economy-1357657820399_956x500.jpg"
iss = "http://images.forwallpaper.com/files/images/a/a809/a809de18/32241/notepad.jpg"
desk = "https://blogpontodeonibus.files.wordpress.com/2012/07/expresso_brasileirold_chassiscania_1.jpg"
drawer = "http://s.glbimg.com/og/rg/f/original/2010/07/09/tiago606.jpg"
imageM = ""
sala_norte = Sala([isn, desk, iss, iso], NONE) # mar
sala_leste = Sala([isn, isl, iss, iso], NONE) # mar
sala_sul = Sala([irn, irl, irs, iro], NONE) # deserto
sala_oeste = Sala([isn, isl, iss, iso], NONE) # mar
salas = [sala_norte.norte, sala_leste.leste, sala_sul.sul, sala_oeste.oeste]
sala_centro = Sala([imn, iml, ims, imo], salas)
labirinto = Leao.SETOR = Labirinto([
sala_centro, sala_norte, sala_leste, sala_sul, sala_oeste])
labirinto.norte.leste.meio = Cena(img=imageM)
labirinto.sul.sul.meio = Cena(vai=self.help) # mudado
labirinto.leste.sul.meio = Cena(vai=self.pega_invent) # mudado
labirinto = Cena(vai=self.objetivo) # mudado
return labirinto
def nao_monta(self):
pass
def vai(self):
labirinto = self.monta()
self.monta = self.nao_monta
labirinto.centro.norte.vai()
return labirinto
"""def pega_card(self):
riocard = "https://www.cartaoriocard.com.br/rcc/static/img/personal-1.png" #link da imagem
flag = None
def clicou(_):
#hipótese de flag
input("Você não está num meio de transporte.")
if not "card" in INVENTARIO.inventario: #Se o Rio Card não estiver no inventário significa que ele pegou
input("Você pegou o RioCard.")
INVENTARIO.bota("card", riocard, clicou)
else:
input("Atenção: o inventário está vazio!")"""
def pega_invent(self):
riocard = "https://www.cartaoriocard.com.br/rcc/static/img/personal-1.png" # link da imagem
flag = None
def clicou(_):
# hipótese de flag
input("Você não está num meio de transporte.")
if not "card" in INVENTARIO.inventario: # Se o Rio Card não estiver no inventário significa que ele pegou
input("Você pegou o RioCard.")
INVENTARIO.bota("card", riocard, clicou)
else:
input("Atenção: o inventário está vazio!")
def help(self):
ajuda = "http://icons.iconarchive.com/icons/oxygen-icons.org/oxygen/256/Actions-help-hint-icon.png"
flag = None
def clicou(_):
# caso aconteça flag
input("Você precisa ir na sala à leste do atendimento.")
if not "ajuda" in INVENTARIO.inventario:
input("Você quer saber sobre o meu relátorio sobre a gripe? Ele na escrivaninha na sala lesta à recepção.")
INVENTARIO.bota("ajuda", ajuda, clicou)
else:
input("Achou o relatorio? Procurou na sala certa?")
"""
def objetivo(self):
ajuda = "http://www.iconsdownload.net/icons/256/11335-target-icon.png"
flag = None
def clicou(_):
input("Objetivo do programa: Você é um repórter e precisa achar o relatório com o resumo de todas as matérias que você vai conquistar nos diversos lugares do labirinto.")
"""
INSTANCIA = None
def leao():
def cria_leao():
global INSTANCIA
INSTANCIA = Leao()
if not INSTANCIA:
cria_leao()
return INSTANCIA
if __name__ == "__main__":
change_bg = "Para qual cor você quer mudar o plano de fundo? azul/branco"
escolha = input(change_bg)
if escolha == "azul":
background = "blue"
lab = leao()
print(INSTANCIA)
INVENTARIO.inicia()
lab.vai()
# lab.centro.norte.vai()
# lab.sul.oeste.meio = metro.centro.norte
| gpl-3.0 | -6,861,895,350,810,168,000 | 39.288889 | 182 | 0.633388 | false |
levilucio/SyVOLT | GM2AUTOSAR_MM/transformation/HMapPartition.py | 1 | 3685 | from core.himesis import Himesis
import uuid
class HMapPartition(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule MapPartition.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMapPartition, self).__init__(name='HMapPartition', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """MapPartition"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MapPartition')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Partition() node
self.add_node()
self.vs[3]["mm__"] = """Partition"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Partition()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class PhysicalNode() node
self.add_node()
self.vs[5]["mm__"] = """PhysicalNode"""
self.vs[5]["attr1"] = """1"""
# match_contains node for class PhysicalNode()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# match class Module() node
self.add_node()
self.vs[7]["mm__"] = """Module"""
self.vs[7]["attr1"] = """1"""
# match_contains node for class Module()
self.add_node()
self.vs[8]["mm__"] = """match_contains"""
# apply class SwcToEcuMapping() node
self.add_node()
self.vs[9]["mm__"] = """SwcToEcuMapping"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class SwcToEcuMapping()
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# match association PhysicalNode--partition-->Partition node
self.add_node()
self.vs[11]["attr1"] = """partition"""
self.vs[11]["mm__"] = """directLink_S"""
# match association Partition--module-->Module node
self.add_node()
self.vs[12]["attr1"] = """module"""
self.vs[12]["mm__"] = """directLink_S"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Partition()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class PhysicalNode()
(0,8), # matchmodel -> match_contains
(8,7), # match_contains -> match_class Module()
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class SwcToEcuMapping()
(5,11), # match_class PhysicalNode() -> association partition
(11,3), # association partition -> match_class Partition()
(3,12), # match_class Partition() -> association module
(12,7), # association module -> match_class Module()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((9,'shortName'),('concat',(('constant','Swc2EcuMapping_'),(3,'name')))), ]
| mit | 4,636,470,346,832,838,000 | 32.5 | 104 | 0.493623 | false |
benpetty/Code-Katas | katas/sort_cards/sort_cards.py | 1 | 1347 | """Sort Cards.
https://www.codewars.com/kata/56f399b59821793533000683
Write a function sort_cards() that sorts a shuffled list of cards,
so that any given list of cards is sorted by rank,
no matter the starting collection.
All cards in the list are represented as strings,
so that sorted list of cards looks like this:
['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
Example:
>>> sort_cards(
['3', '9', 'A', '5', 'T', '8', '2', '4', 'Q', '7', 'J', '6', 'K']
['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
Hint: Tests will have many occurrences of same rank cards,
as well as vary in length. You can assume though,
that input list is always going to have at least 1 element.
"""
def sort_cards(cards):
"""Input a list of strings representing cards and return them sorted."""
rank = {
"A": 0,
"2": 1,
"3": 2,
"4": 3,
"5": 4,
"6": 5,
"7": 6,
"8": 7,
"9": 8,
"T": 9,
"J": 10,
"Q": 11,
"K": 12,
}
ranked = []
for card in cards:
card = str(card).upper()
if card in rank:
card = (rank[card], card)
ranked.append(card)
ranked = sorted(ranked)
result = []
for card in ranked:
result.append(card[1])
return result
| mit | 3,330,309,443,274,831,400 | 23.944444 | 76 | 0.513734 | false |
karlnapf/kameleon-mcmc | kameleon_mcmc/tools/Visualise.py | 1 | 5656 | """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2013 Heiko Strathmann
Written (W) 2013 Dino Sejdinovic
"""
from kameleon_mcmc.distribution.Gaussian import Gaussian
from matplotlib.patches import Ellipse
from matplotlib.pyplot import imshow, ylim, xlim, contour, plot, hold, gca
from numpy import linspace
from numpy.linalg.linalg import eigh
from numpy import zeros, array, exp, arctan2, sqrt
import numpy
class Visualise(object):
def __init__(self):
pass
@staticmethod
def get_plotting_arrays(distribution):
bounds = distribution.get_plotting_bounds()
assert(len(bounds) == 2)
Xs = linspace(bounds[0][0], bounds[0][1])
Ys = linspace(bounds[1][0], bounds[1][1])
return Xs, Ys
@staticmethod
def visualise_distribution(distribution, Z=None, log_density=False, Xs=None, Ys=None):
"""
Plots the density of a given Distribution instance and plots some
samples on top.
"""
if Xs is None or Ys is None:
Xs, Ys = Visualise.get_plotting_arrays(distribution)
Visualise.plot_density(distribution, Xs, Ys)
if Z is not None:
hold(True)
Visualise.plot_data(Z)
hold(False)
@staticmethod
def plot_density(distribution, Xs, Ys, log_domain=False):
"""
Plots a 2D density
density - density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
assert(distribution.dimension == 2)
D = zeros((len(Xs), len(Ys)))
# compute log-density
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Ys.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def contour_plot_density(distribution, Xs=None, Ys=None, log_domain=False):
"""
Contour-plots a 2D density. If Gaussian, plots 1.96 interval contour only
density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
if isinstance(distribution, Gaussian) and log_domain == False:
gca().add_artist(Visualise.get_gaussian_ellipse_artist(distribution))
gca().plot(distribution.mu[0], distribution.mu[1], 'r*', \
markersize=3.0, markeredgewidth=.1)
return
assert(distribution.dimension == 2)
if Xs is None:
(xmin, xmax), _ = distribution.get_plotting_bounds()
Xs = linspace(xmin, xmax)
if Ys is None:
_, (ymin, ymax) = distribution.get_plotting_bounds()
Ys = linspace(ymin, ymax)
D = zeros((len(Ys), len(Xs)))
# compute log-density
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
contour(Xs, Ys, D, origin='lower')
@staticmethod
def plot_array(Xs, Ys, D):
"""
Plots a 2D array
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
D - array to plot
"""
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Ys.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def plot_data(Z, y=None):
"""
Plots collection of 2D points and optionally adds a marker to one of them
Z - set of row-vectors points to plot
y - one point that is marked in red, might be None
"""
plot(Z[:, 0], Z[:, 1], '*', markersize=3.0, markeredgewidth=.1)
if y is not None:
plot(y[0, 0], y[0, 1], 'r*', markersize=10.0, markeredgewidth=.1)
@staticmethod
def get_gaussian_ellipse_artist(gaussian, nstd=1.96, linewidth=1):
"""
Returns an allipse artist for nstd times the standard deviation of this
Gaussian
"""
assert(isinstance(gaussian, Gaussian))
assert(gaussian.dimension == 2)
# compute eigenvalues (ordered)
vals, vecs = eigh(gaussian.L.dot(gaussian.L.T))
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
# width and height are "full" widths, not radius
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=gaussian.mu, width=width, height=height, angle=theta, \
edgecolor="red", fill=False, linewidth=linewidth)
return e
| bsd-2-clause | 31,964,555,859,623,480 | 33.072289 | 90 | 0.563296 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/add_newdocs.py | 1 | 44163 | from lib import add_newdoc
add_newdoc('numpy.core','dtype',
[('fields', "Fields of the data-type or None if no fields"),
('names', "Names of fields or None if no fields"),
('alignment', "Needed alignment for this data-type"),
('byteorder',
"Little-endian (<), big-endian (>), native (=), or "\
"not-applicable (|)"),
('char', "Letter typecode for this data-type"),
('type', "Type object associated with this data-type"),
('kind', "Character giving type-family of this data-type"),
('itemsize', "Size of each item"),
('hasobject', "Non-zero if Python objects are in "\
"this data-type"),
('num', "Internally-used number for builtin base"),
('newbyteorder',
"""self.newbyteorder(<endian>)
returns a copy of the dtype object with altered byteorders.
If <endian> is not given all byteorders are swapped.
Otherwise endian can be '>', '<', or '=' to force a particular
byteorder. Data-types in all fields are also updated in the
new dtype object.
"""),
("__reduce__", "self.__reduce__() for pickling"),
("__setstate__", "self.__setstate__() for pickling"),
("subdtype", "A tuple of (descr, shape) or None"),
("descr", "The array_interface data-type descriptor."),
("str", "The array interface typestring."),
("name", "The name of the true data-type"),
("base", "The base data-type or self if no subdtype"),
("shape", "The shape of the subdtype or (1,)"),
("isbuiltin", "Is this a built-in data-type?"),
("isnative", "Is the byte-order of this data-type native?")
]
)
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
# attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""documentation needed
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""An N-d tuple of current coordinates.
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""documentation needed
"""))
# functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""copy() Get a copy of the iterator as a 1-d array
"""))
###############################################################################
#
# broadcast
#
###############################################################################
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""current index in broadcasted result
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""tuple of individual iterators
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""number of dimensions of broadcasted result
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""number of iterators
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""shape of broadcasted result
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""total size of broadcasted result
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray','array',
"""array(object, dtype=None, copy=1,order=None, subok=0,ndmin=0)
Return an array from object with the specified date-type.
Inputs:
object - an array, any object exposing the array interface, any
object whose __array__ method returns an array, or any
(nested) sequence.
dtype - The desired data-type for the array. If not given, then
the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only
be used to 'upcast' the array. For downcasting, use the
.astype(t) method.
copy - If true, then force a copy. Otherwise a copy will only occur
if __array__ returns a copy, obj is a nested sequence, or
a copy is needed to satisfy any of the other requirements
order - Specify the order of the array. If order is 'C', then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'FORTRAN', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is None, then the returned array may
be in either C-, or Fortran-contiguous order or even
discontiguous.
subok - If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array
ndmin - Specifies the minimum number of dimensions that the resulting
array should have. 1's will be pre-pended to the shape as
needed to meet this requirement.
""")
add_newdoc('numpy.core.multiarray','empty',
"""empty((d1,...,dn),dtype=float,order='C')
Return a new array of shape (d1,...,dn) and given type with all its
entries uninitialized. This can be faster than zeros.
""")
add_newdoc('numpy.core.multiarray','scalar',
"""scalar(dtype,obj)
Return a new scalar array of the given type initialized with
obj. Mainly for pickle support. The dtype must be a valid data-type
descriptor. If dtype corresponds to an OBJECT descriptor, then obj
can be any object, otherwise obj must be a string. If obj is not given
it will be interpreted as None for object type and zeros for all other
types.
""")
add_newdoc('numpy.core.multiarray','zeros',
"""zeros((d1,...,dn),dtype=float,order='C')
Return a new array of shape (d1,...,dn) and type typecode with all
it's entries initialized to zero.
""")
add_newdoc('numpy.core.multiarray','set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray','fromstring',
"""fromstring(string, dtype=float, count=-1, sep='')
Return a new 1d array initialized from the raw binary data in string.
If count is positive, the new array will have count elements, otherwise its
size is determined by the size of string. If sep is not empty then the
string is interpreted in ASCII mode and converted to the desired number type
using sep as the separator between elements (extra whitespace is ignored).
""")
add_newdoc('numpy.core.multiarray','fromiter',
"""fromiter(iterable, dtype, count=-1)
Return a new 1d array initialized from iterable. If count is
nonegative, the new array will have count elements, otherwise it's
size is determined by the generator.
""")
add_newdoc('numpy.core.multiarray','fromfile',
"""fromfile(file=, dtype=float, count=-1, sep='') -> array.
Required arguments:
file -- open file object or string containing file name.
Keyword arguments:
dtype -- type and order of the returned array (default float)
count -- number of items to input (default all)
sep -- separater between items if file is a text file (default "")
Return an array of the given data type from a text or binary file. The
'file' argument can be an open file or a string with the name of a file to
read from. If 'count' == -1 the entire file is read, otherwise count is the
number of items of the given type to read in. If 'sep' is "" it means to
read binary data from the file using the specified dtype, otherwise it gives
the separator between elements in a text file. The 'dtype' value is also
used to determine the size and order of the items in binary files.
Data written using the tofile() method can be conveniently recovered using
this function.
WARNING: This function should be used sparingly as the binary files are not
platform independent. In particular, they contain no endianess or datatype
information. Nevertheless it can be useful for reading in simply formatted
or binary data quickly.
""")
add_newdoc('numpy.core.multiarray','frombuffer',
"""frombuffer(buffer=, dtype=float, count=-1, offset=0)
Returns a 1-d array of data type dtype from buffer. The buffer
argument must be an object that exposes the buffer interface. If
count is -1 then the entire buffer is used, otherwise, count is the
size of the output. If offset is given then jump that far into the
buffer. If the buffer has data that is out not in machine byte-order,
than use a propert data type descriptor. The data will not be
byteswapped, but the array will manage it in future operations.
""")
add_newdoc('numpy.core.multiarray','concatenate',
"""concatenate((a1, a2, ...), axis=0)
Join arrays together.
The tuple of sequences (a1, a2, ...) are joined along the given axis
(default is the first one) into a single numpy array.
Example:
>>> concatenate( ([0,1,2], [5,6,7]) )
array([0, 1, 2, 5, 6, 7])
""")
add_newdoc('numpy.core.multiarray','inner',
"""inner(a,b)
Returns the dot product of two arrays, which has shape a.shape[:-1] +
b.shape[:-1] with elements computed by the product of the elements
from the last dimensions of a and b.
""")
add_newdoc('numpy.core','fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray','correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray','arange',
"""arange([start,] stop[, step,], dtype=None)
For integer arguments, just like range() except it returns an array
whose type can be specified by the keyword argument dtype. If dtype
is not specified, the type of the result is deduced from the type of
the arguments.
For floating point arguments, the length of the result is ceil((stop -
start)/step). This rule may result in the last element of the result
being greater than stop.
""")
add_newdoc('numpy.core.multiarray','_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray','_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray','set_string_function',
"""set_string_function(f, repr=1)
Set the python function f to be the function used to obtain a pretty
printable string version of an array whenever an array is printed.
f(M) should expect an array argument M, and should return a string
consisting of the desired representation of M for printing.
""")
add_newdoc('numpy.core.multiarray','set_numeric_ops',
"""set_numeric_ops(op=func, ...)
Set some or all of the number methods for all array objects. Do not
forget **dict can be used as the argument list. Return the functions
that were replaced, which can be stored and set later.
""")
add_newdoc('numpy.core.multiarray','where',
"""where(condition, x, y) or where(condition)
Return elements from `x` or `y`, depending on `condition`.
*Parameters*:
condition : array of bool
When True, yield x, otherwise yield y.
x,y : 1-dimensional arrays
Values from which to choose.
*Notes*
This is equivalent to
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
The result is shaped like `condition` and has elements of `x`
or `y` where `condition` is respectively True or False.
In the special case, where only `condition` is given, the
tuple condition.nonzero() is returned, instead.
*Examples*
>>> where([True,False,True],[1,2,3],[4,5,6])
array([1, 5, 3])
""")
add_newdoc('numpy.core.multiarray','lexsort',
"""lexsort(keys=, axis=-1) -> array of indices. Argsort with list of keys.
Perform an indirect sort using a list of keys. The first key is sorted,
then the second, and so on through the list of keys. At each step the
previous order is preserved when equal keys are encountered. The result is
a sort on multiple keys. If the keys represented columns of a spreadsheet,
for example, this would sort using multiple columns (the last key being
used for the primary sort order, the second-to-last key for the secondary
sort order, and so on). The keys argument must be a sequence of things
that can be converted to arrays of the same shape.
Parameters:
a : array type
Array containing values that the returned indices should sort.
axis : integer
Axis to be indirectly sorted. None indicates that the flattened
array should be used. Default is -1.
Returns:
indices : integer array
Array of indices that sort the keys along the specified axis. The
array has the same shape as the keys.
SeeAlso:
argsort : indirect sort
sort : inplace sort
""")
add_newdoc('numpy.core.multiarray','can_cast',
"""can_cast(from=d1, to=d2)
Returns True if data type d1 can be cast to data type d2 without
losing precision.
""")
add_newdoc('numpy.core.multiarray','newbuffer',
"""newbuffer(size)
Return a new uninitialized buffer object of size bytes
""")
add_newdoc('numpy.core.multiarray','getbuffer',
"""getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset. Default is the entire buffer. A
read-write buffer is attempted followed by a read-only buffer.
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type-descriptor object
details the data-type in an array (including byteorder and any
fields). An array can be constructed using the numpy.array
command. Arrays are sequence, mapping and numeric objects.
More information is available in the numpy module and by looking
at the methods and attributes of an array.
ndarray.__new__(subtype, shape=, dtype=float, buffer=None,
offset=0, strides=None, order=None)
There are two modes of creating an array using __new__:
1) If buffer is None, then only shape, dtype, and order
are used
2) If buffer is an object exporting the buffer interface, then
all keywords are interpreted.
The dtype parameter can be any object that can be interpreted
as a numpy.dtype object.
No __init__ method is needed because the array is fully
initialized after the __new__ method.
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""Base object if memory is from some other object.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""A ctypes interface object.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Buffer object pointing to the start of the data.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""Data-type for the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""Imaginary part of the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""Length of one element in bytes.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""Special object providing array flags.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""A 1-d flat iterator.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""Number of bytes in the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""Number of array dimensions.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""Real part of the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""Tuple of array dimensions.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""Number of elements in the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""Tuple of bytes to step in each dimension.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""Same as self.transpose() except self is returned for self.ndim < 2.
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as a from ndarray obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__(|order) -> copy, possibly with different order.
Return a copy of the array.
Argument:
order -- Order of returned copy (default 'C')
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if m is already in fortran order.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, typecode, isfortran, rawdata)
For unpickling.
Arguments:
version -- optional pickle version. If omitted defaults to 0.
shape -- a tuple giving the shape
typecode -- a typecode
isFortran -- a bool stating if Fortran or no
rawdata -- a binary string with the data (or a list if Object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
""" a.all(axis=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
""" a.any(axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
""" a.argmax(axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
""" a.argmin(axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""a.argsort(axis=-1, kind='quicksort', order=None) -> indices
Perform an indirect sort along the given axis using the algorithm specified
by the kind keyword. It returns an array of indices of the same shape as
'a' that index data along the given axis in sorted order.
:Parameters:
axis : integer
Axis to be indirectly sorted. None indicates that the flattened
array should be used. Default is -1.
kind : string
Sorting algorithm to use. Possible values are 'quicksort',
'mergesort', or 'heapsort'. Default is 'quicksort'.
order : list type or None
When a is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
:Returns:
indices : integer array
Array of indices that sort 'a' along the specified axis.
:SeeAlso:
- lexsort : indirect stable sort with multiple keys
- sort : inplace sort
:Notes:
------
The various sorts are characterized by average speed, worst case
performance, need for work space, and whether they are stable. A stable
sort keeps items with the same key in the same relative order. The three
available algorithms have the following properties:
|------------------------------------------------------|
| kind | speed | worst case | work space | stable|
|------------------------------------------------------|
|'quicksort'| 1 | O(n^2) | 0 | no |
|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes |
|'heapsort' | 3 | O(n*log(n)) | 0 | no |
|------------------------------------------------------|
All the sort algorithms make temporary copies of the data when the sort is not
along the last axis. Consequently, sorts along the last axis are faster and use
less space than sorts along other axis.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""a.astype(t) -> Copy of array cast to type t.
Cast array m to type t. t can be either a string representing a typecode,
or a python type object of type int, float, or complex.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""a.byteswap(False) -> View or copy. Swap the bytes in the array.
Swap the bytes in the array. Return the byteswapped array. If the first
argument is True, byteswap in-place and return a reference to self.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
""" a.choose(b0, b1, ..., bn, out=None, mode='raise')
Return an array that merges the b_i arrays together using 'a' as
the index The b_i arrays and 'a' must all be broadcastable to the
same shape. The output at a particular position is the input
array b_i at that position depending on the value of 'a' at that
position. Therefore, 'a' must be an integer array with entries
from 0 to n+1.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""a.clip(min=, max=, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""a.compress(condition=, axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""a.conj()
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""a.conjugate()
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""a.copy(|order) -> copy, possibly with different order.
Return a copy of the array.
Argument:
order -- Order of returned copy (default 'C')
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if m is already in fortran order.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""a.cumprod(axis=None, dtype=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""a.cumsum(axis=None, dtype=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""a.diagonal(offset=0, axis1=0, axis2=1) -> diagonals
If a is 2-d, return the diagonal of self with the given offset, i.e., the
collection of elements of the form a[i,i+offset]. If a is n-d with n > 2,
then the axes specified by axis1 and axis2 are used to determine the 2-d
subarray whose diagonal is returned. The shape of the resulting array can
be determined by removing axis1 and axis2 and appending an index to the
right equal to the size of the resulting diagonals.
:Parameters:
offset : integer
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to main diagonal.
axis1 : integer
Axis to be used as the first axis of the 2-d subarrays from which
the diagonals should be taken. Defaults to first index.
axis2 : integer
Axis to be used as the second axis of the 2-d subarrays from which
the diagonals should be taken. Defaults to second index.
:Returns:
array_of_diagonals : same type as original array
If a is 2-d, then a 1-d array containing the diagonal is returned.
If a is n-d, n > 2, then an array of diagonals is returned.
:SeeAlso:
- diag : matlab workalike for 1-d and 2-d arrays.
- diagflat : creates diagonal arrays
- trace : sum along diagonals
Examples
--------
>>> a = arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
>>> a = arange(8).reshape(2,2,2)
>>> a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0,-2,-1)
array([[0, 3],
[4, 7]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file) Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load
Arguments:
file -- string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""a.dumps() returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""a.fill(value) -> None. Fill the array with the scalar value.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""a.flatten([fortran]) return a 1-d array (always copy)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""a.getfield(dtype, offset) -> field of array as given type.
Returns a field of the given array as a certain type. A field is a view of
the array data with each itemsize determined by the given type and the
offset into the current array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""a.item() -> copy of first array item as Python scalar.
Copy the first element of array to a standard Python scalar and return
it. The array must be of size one.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""a.max(axis=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""a.mean(axis=None, dtype=None, out=None) -> mean
Returns the average of the array elements. The average is taken over the
flattened array by default, otherwise over the specified axis.
:Parameters:
axis : integer
Axis along which the means are computed. The default is
to compute the standard deviation of the flattened array.
dtype : type
Type to use in computing the means. For arrays of
integer type the default is float32, for arrays of float types it
is the same as the array type.
out : ndarray
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
:Returns:
mean : The return type varies, see above.
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
:SeeAlso:
- var : variance
- std : standard deviation
Notes
-----
The mean is the sum of the elements along the axis divided by the
number of elements.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""a.min(axis=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""a.newbyteorder(<byteorder>) is equivalent to
a.view(a.dtype.newbytorder(<byteorder>))
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""a.nonzero() returns a tuple of arrays
Returns a tuple of arrays, one for each dimension of a,
containing the indices of the non-zero elements in that
dimension. The corresponding non-zero values can be obtained
with
a[a.nonzero()].
To group the indices by element, rather than dimension, use
transpose(a.nonzero())
instead. The result of this is always a 2d array, with a row for
each non-zero element.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""a.prod(axis=None, dtype=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""a.ptp(axis=None) a.max(axis)-a.min(axis)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""a.put(indices, values, mode) sets a.flat[n] = values[n] for
each n in indices. If values is shorter than indices then it
will repeat.
"""))
add_newdoc('numpy.core.multiarray', 'putmask',
"""putmask(a, mask, values) sets a.flat[n] = values[n] for each n where
mask.flat[n] is true. If values is not the same size of a and mask then
it will repeat. This gives different behavior than a[mask] = values.
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""a.ravel([fortran]) return a 1-d array (copy only if needed)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""a.repeat(repeats=, axis=none)
copy elements of a, repeats times. the repeats argument must be a sequence
of length a.shape[axis] or a scalar.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""a.reshape(d1, d2, ..., dn, order='c')
Return a new array from this one. The new array must have the same number
of elements as self. Also always returns a view or raises a ValueError if
that is impossible.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""a.resize(new_shape, refcheck=True, order=False) -> None. Change array shape.
Change size and shape of self inplace. Array must own its own memory and
not be referenced by other arrays. Returns None.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""a.round(decimals=0, out=None) -> out (a). Rounds to 'decimals' places.
Keyword arguments:
decimals -- number of decimals to round to (default 0). May be negative.
out -- existing array to use for output (default a).
Return:
Reference to out, where None specifies the original array a.
Round to the specified number of decimals. When 'decimals' is negative it
specifies the number of positions to the left of the decimal point. The
real and imaginary parts of complex numbers are rounded separately. Nothing
is done if the array is not of float type and 'decimals' is >= 0.
The keyword 'out' may be used to specify a different array to hold the
result rather than the default 'a'. If the type of the array specified by
'out' differs from that of 'a', the result is cast to the new type,
otherwise the original type is kept. Floats round to floats by default.
Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to
0.0, etc. Results may also be surprising due to the inexact representation
of decimal fractions in IEEE floating point and the errors introduced in
scaling the numbers when 'decimals' is something other than 0.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""a.searchsorted(v, side='left') -> index array.
Find the indices into a sorted array such that if the corresponding keys in
v were inserted before the indices the order of a would be preserved. If
side='left', then the first such index is returned. If side='right', then
the last such index is returned. If there is no such index because the key
is out of bounds, then the length of a is returned, i.e., the key would
need to be appended. The returned index array has the same shape as v.
:Parameters:
v : array or list type
Array of keys to be searched for in a.
side : string
Possible values are : 'left', 'right'. Default is 'left'. Return
the first or last index where the key could be inserted.
:Returns:
indices : integer array
The returned array has the same shape as v.
:SeeAlso:
- sort
- histogram
:Notes:
-------
The array a must be 1-d and is assumed to be sorted in ascending order.
Searchsorted uses binary search to find the required insertion points.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""m.setfield(value, dtype, offset) -> None.
places val into field of the given array defined by the data type and offset.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""a.setflags(write=None, align=None, uic=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""a.sort(axis=-1, kind='quicksort', order=None) -> None.
Perform an inplace sort along the given axis using the algorithm specified
by the kind keyword.
:Parameters:
axis : integer
Axis to be sorted along. None indicates that the flattened array
should be used. Default is -1.
kind : string
Sorting algorithm to use. Possible values are 'quicksort',
'mergesort', or 'heapsort'. Default is 'quicksort'.
order : list type or None
When a is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
:Returns:
None
:SeeAlso:
- argsort : indirect sort
- lexsort : indirect stable sort on multiple keys
- searchsorted : find keys in sorted array
:Notes:
------
The various sorts are characterized by average speed, worst case
performance, need for work space, and whether they are stable. A stable
sort keeps items with the same key in the same relative order. The three
available algorithms have the following properties:
|------------------------------------------------------|
| kind | speed | worst case | work space | stable|
|------------------------------------------------------|
|'quicksort'| 1 | O(n^2) | 0 | no |
|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes |
|'heapsort' | 3 | O(n*log(n)) | 0 | no |
|------------------------------------------------------|
All the sort algorithms make temporary copies of the data when the sort is not
along the last axis. Consequently, sorts along the last axis are faster and use
less space than sorts along other axis.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""m.squeeze() eliminate all length-1 dimensions
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""a.std(axis=None, dtype=None, out=None) -> standard deviation.
Returns the standard deviation of the array elements, a measure of the
spread of a distribution. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
:Parameters:
axis : integer
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : type
Type to use in computing the standard deviation. For arrays of
integer type the default is float32, for arrays of float types it
is the same as the array type.
out : ndarray
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
:Returns:
standard deviation : The return type varies, see above.
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
:SeeAlso:
- var : variance
- mean : average
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e. var = sqrt(mean((x - x.mean())**2)). The
computed standard deviation is biased, i.e., the mean is computed by
dividing by the number of elements, N, rather than by N-1.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""a.sum(axis=None, dtype=None) -> Sum of array over given axis.
Sum the array over the given axis. If the axis is None, sum over
all dimensions of the array.
The optional dtype argument is the data type for the returned
value and intermediate calculations. The default is to upcast
(promote) smaller integer types to the platform-dependent int.
For example, on 32-bit platforms:
a.dtype default sum dtype
---------------------------------------------------
bool, int8, int16, int32 int32
Warning: The arithmetic is modular and no error is raised on overflow.
Examples:
>>> array([0.5, 1.5]).sum()
2.0
>>> array([0.5, 1.5]).sum(dtype=int32)
1
>>> array([[0, 1], [0, 5]]).sum(axis=0)
array([0, 6])
>>> array([[0, 1], [0, 5]]).sum(axis=1)
array([1, 5])
>>> ones(128, dtype=int8).sum(dtype=int8) # overflow!
-128
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""a.swapaxes(axis1, axis2) -> new view with axes swapped.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""a.take(indices, axis=None, out=None, mode='raise') -> new array.
The new array is formed from the elements of a indexed by indices along the
given axis.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""a.tofile(fid, sep="", format="%s") -> None. Write the data to a file.
Required arguments:
file -- an open file object or a string containing a filename
Keyword arguments:
sep -- separator for text output. Write binary if empty (default "")
format -- format string for text file output (default "%s")
A convenience function for quick storage of array data. Information on
endianess and precision is lost, so this method is not a good choice for
files intended to archive data or transport data between machines with
different endianess. Some of these problems can be overcome by outputting
the data as text files at the expense of speed and file size.
If 'sep' is empty this method is equivalent to file.write(a.tostring()). If
'sep' is not empty each data item is converted to the nearest Python type
and formatted using "format"%item. The resulting strings are written to the
file separated by the contents of 'sep'. The data is always written in "C"
(row major) order independent of the order of 'a'.
The data produced by this method can be recovered by using the function
fromfile().
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""a.tolist() -> Array as hierarchical list.
Copy the data portion of the array to a hierarchical python list and return
that list. Data items are converted to the nearest compatible Python type.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring',
"""a.tostring(order='C') -> raw copy of array data as a Python string.
Keyword arguments:
order -- order of the data item in the copy {"C","F","A"} (default "C")
Construct a Python string containing the raw bytes in the array. The order
of the data in arrays with ndim > 1 is specified by the 'order' keyword and
this keyword overrides the order of the array. The
choices are:
"C" -- C order (row major)
"Fortran" -- Fortran order (column major)
"Any" -- Current order of array.
None -- Same as "Any"
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
return the sum along the offset diagonal of the array's indicated
axis1 and axis2.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""a.transpose(*axes)
Returns a view of 'a' with axes transposed. If no axes are given,
or None is passed, switches the order of the axes. For a 2-d
array, this is the usual matrix transpose. If axes are given,
they describe how the axes are permuted.
Example:
>>> a = array([[1,2],[3,4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1,0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1,0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""a.var(axis=None, dtype=None, out=None) -> variance
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by default,
otherwise over the specified axis.
:Parameters:
axis : integer
Axis along which the variance is computed. The default is to
compute the variance of the flattened array.
dtype : type
Type to use in computing the variance. For arrays of integer type
the default is float32, for arrays of float types it is the same as
the array type.
out : ndarray
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
:Returns:
variance : The return type varies, see above.
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
:SeeAlso:
- std : standard deviation
- mean: average
Notes
-----
The variance is the average of the squared deviations from the mean, i.e.
var = mean((x - x.mean())**2). The computed variance is biased, i.e.,
the mean is computed by dividing by the number of elements, N, rather
than by N-1.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""a.view(<type>) -> new view of array with same data.
Type can be either a new sub-type object or a data-descriptor object
"""))
| bsd-3-clause | -6,906,141,212,455,126,000 | 29.797071 | 83 | 0.613206 | false |
italomaia/turtle-linux | games/BubbleKing/lib/menu.py | 1 | 13774 | import os
import pygame
from pygame.locals import *
from pgu import engine
import data
from cnst import *
import levels
class Menu(engine.State):
def __init__(self,game):
self.game = game
def init(self):
self.font = self.game.font
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr','2.png')))
self.cur = 0
self.game.lcur = 0
self.levels = []
#for fname in os.listdir(data.filepath('levels')):
#if fname[0]=='.': continue
#self.levels.append((fname,fname.replace('.tga','')))
#self.levels.sort()
for fname,title in levels.LEVELS:
self.levels.append((fname,title))
self.items = [
('play the game!','start'),
('select <L>','play'),
('help','help'),
('credits','credits'),
('quit','quit'),
]
self.rects = []
self.frame = 0
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
x,y = 0,4
fnt = self.game.fonts['title']
c =(0,0,0)
text = TITLE
img = fnt.render(text,1,c)
screen.blit(img,((SW-img.get_width())/2,y))
y += 48
fnt = self.font
text = 'high: %05d'%self.game.high
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
x = 90
for n in xrange(0,len(self.items)):
text,value = self.items[n]
text = text.replace('L',self.levels[self.game.lcur][1])
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
if n == self.cur: c = (0xaa,0xaa,0xaa)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 24
text = 'www.imitationpickles.org'
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
y = SH-(img.get_height()+4)
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
def update(self,screen):
return self.paint(screen)
def loop(self):
self.game.music_play('title')
self.frame += 1
def event(self,e):
if e.type is USEREVENT and e.action == 'down':
self.cur = (self.cur+1)%len(self.items)
self.repaint()
elif e.type is USEREVENT and e.action == 'up':
self.cur = (self.cur-1+len(self.items))%len(self.items)
self.repaint()
elif e.type is USEREVENT and e.action == 'left':
self.game.lcur = (self.game.lcur-1+len(self.levels))%len(self.levels)
self.repaint()
elif e.type is USEREVENT and e.action == 'right':
self.game.lcur = (self.game.lcur+1)%len(self.levels)
self.repaint()
elif e.type is USEREVENT and e.action == 'exit':
return engine.Quit(self.game)
elif e.type is USEREVENT and (e.action == 'menu' or e.action == 'jump'):
text,value = self.items[self.cur]
if value == 'start':
self.game.init_play()
self.game.lcur = 0
import level
l = level.Level(self.game,None,self)
return Transition(self.game,l)
elif value == 'play':
self.game.init_play()
import level
l = level.Level(self.game,None,self)
return Transition(self.game,l)
elif value == 'quit':
return engine.Quit(self.game)
elif value == 'credits':
return Transition(self.game,Credits(self.game,self))
elif value == 'help':
return Transition(self.game,Help(self.game,self))
class Transition(engine.State):
def __init__(self,game,next):
self.game,self.next = game,next
def init(self):
self.s1 = self.game.screen.convert()
self.init2()
self.frame = 0
self.total = FPS
self.inc = 0
def init2(self):
if hasattr(self.next,'init') and not hasattr(self.next,'_init'):
self.next._init = 0
self.next.init()
self.s2 = self.game.screen.convert()
self.next.paint(self.s2)
def loop(self):
#self.frame += 1
self.inc += 1
#if (self.inc%2) == 0: self.frame += 1
self.frame += 1
if self.frame == self.total:
self.game.screen.blit(self.s2,(0,0))
self.game.flip()
return self.next
def update(self,screen):
return self.paint(screen)
def paint(self,screen):
f = self.frame
t = self.total
t2 = t/2
if f < t2:
i = self.s1
w = max(2,SW * (t2-f) / t2)
i = pygame.transform.scale(i,(w,SH*w/SW))
else:
f = t2-(f-t2)
i = self.s2
w = max(2,SW * (t2-f) / t2)
i = pygame.transform.scale(i,(w,SH*w/SW))
i = pygame.transform.scale(i,(SW,SH))
screen.blit(i,(0,0))
self.game.flip()
class Intro(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = FPS
self.moon = pygame.image.load(data.filepath(os.path.join('intro','moon2.png'))).convert()
self.black = self.moon.convert()
self.black.fill((0,0,0))
def update(self,screen):
return self.paint(screen)
def loop(self):
self.frame += 1
if self.frame == FPS*7:
return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
screen.fill((0,0,0))
f = self.frame
inc = FPS
if 0 < f < inc:
pass
f -= inc
inc = FPS*7
if 0 < f < inc:
a = 255
if f > FPS*2:
screen.blit(self.moon,(0,0))
a = 255- ((f-FPS*2)*255/(FPS*2))
self.black.set_alpha(a)
screen.blit(self.black,(0,0))
fnt = self.game.fonts['intro']
x,y = 8,0
for text in ['... July 20, 1969','man first','walked on','the moon.']:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
if f < FPS:
a = 255-(f*255/FPS)
self.black.set_alpha(a)
screen.blit(self.black,(0,0))
self.game.flip()
class Intro2(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.moon = pygame.image.load(data.filepath(os.path.join('intro','moon2.png'))).convert()
img = pygame.image.load(data.filepath(os.path.join('images','player','right.png')))
w = 160
self.player = pygame.transform.scale(img,(w,img.get_height()*w/img.get_width()))
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr','2.png')))
self.frame = 0
def loop(self):
self.frame += 1
if self.frame == FPS*2:
return Transition(self.game,self.next)
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
#screen.fill((0,0,0))
screen.blit(self.bkgr,(0,0))
fnt = self.game.fonts['intro']
x,y = 8,0
for text in ['This is','the year','of the','seahorse!']:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
screen.blit(self.player,(130,0))
self.game.flip()
class Prompt(engine.State):
def __init__(self,game,text,yes,no):
self.game = game
self.text = text
self.yes = yes
self.no = no
def init(self):
self.font = self.game.fonts['pause']
self.bkgr = self.game.screen.convert()
def event(self,e):
if e.type is KEYDOWN and e.key == K_y:
return self.yes
if e.type is KEYDOWN and e.key == K_n:
return self.no
def paint(self,screen):
screen.blit(self.bkgr,(0,0))
text = self.text
fnt = self.font
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x,y = (SW-img.get_width())/2,(SH-img.get_height())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
class Pause(engine.State):
def __init__(self,game,text,next):
self.game = game
self.text = text
self.next = next
def init(self):
self.font = self.game.fonts['pause']
self.bkgr = self.game.screen.convert()
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return self.next
def paint(self,screen):
screen.blit(self.bkgr,(0,0))
text = self.text
fnt = self.font
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x,y = (SW-img.get_width())/2,(SH-img.get_height())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
class Credits(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = 0
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr',"5.png"))).convert()
def update(self,screen):
return self.paint(screen)
pass
def loop(self):
self.frame += 1
#if self.frame == FPS*7:
#return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
fnt = self.game.fonts['help']
x,y = 8,10
for text in [
'Core Team',
'',
'philhassey - director, code, levels',
'trick - tiles, sprites',
'pekuja - code, levels',
'tim - music, levels',
'DrPetter - backgrounds, sfx',
'',
'Also thanks to:',
'fydo (level), Lerc (gfx), Tee (level)',
]:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x = (SW-img.get_width())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 20
self.game.flip()
class Help(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = 0
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr',"5.png"))).convert()
def update(self,screen):
return self.paint(screen)
pass
def loop(self):
self.frame += 1
#if self.frame == FPS*7:
#return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
fnt = self.game.fonts['help']
x,y = 8,10
for text in [
'Help',
'',
'Use your arrow keys to',
'move the seahorse.',
'Button 1 - Jump',
'Button 2 - Shoot',
'',
'Enemies take 3 shots unless',
'you are powered up! You can',
'ride enemy bubbles.',
]:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x = (SW-img.get_width())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 20
self.game.flip()
| gpl-3.0 | 2,693,359,161,638,019,000 | 29.074236 | 102 | 0.483665 | false |
mathiasertl/fabric | fabric/context_managers.py | 1 | 20926 | """
Context managers for use with the ``with`` statement.
.. note:: If you are using multiple directly nested ``with`` statements, it can
be convenient to use multiple context expressions in one single with
statement. Instead of writing::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
you can write::
with cd('/path/to/app'), prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
"""
from contextlib import contextmanager
import six
import socket
import select
from fabric.thread_handling import ThreadHandler
from fabric.state import output, win32, connections, env
from fabric import state
from fabric.utils import isatty
if six.PY2 is True:
from contextlib import nested
else:
from contextlib import ExitStack
class nested(ExitStack):
def __init__(self, *managers):
super(nested, self).__init__()
for manager in managers:
self.enter_context(manager)
if not win32:
import termios
import tty
def _set_output(groups, which):
"""
Refactored subroutine used by ``hide`` and ``show``.
"""
previous = {}
try:
# Preserve original values, pull in new given value to use
for group in output.expand_aliases(groups):
previous[group] = output[group]
output[group] = which
# Yield control
yield
finally:
# Restore original values
output.update(previous)
def documented_contextmanager(func):
wrapper = contextmanager(func)
wrapper.undecorated = func
return wrapper
@documented_contextmanager
def show(*groups):
"""
Context manager for setting the given output ``groups`` to True.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to True for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to turn on debug output (which is typically off by default)::
def my_task():
with show('debug'):
run('ls /var/www')
As almost all output groups are displayed by default, `show` is most useful
for turning on the normally-hidden ``debug`` group, or when you know or
suspect that code calling your own code is trying to hide output with
`hide`.
"""
return _set_output(groups, True)
@documented_contextmanager
def hide(*groups):
"""
Context manager for setting the given output ``groups`` to False.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to False for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to hide the "[hostname] run:" status lines, as well as
preventing printout of stdout and stderr, one might use `hide` as follows::
def my_task():
with hide('running', 'stdout', 'stderr'):
run('ls /var/www')
"""
return _set_output(groups, False)
@documented_contextmanager
def _setenv(variables):
"""
Context manager temporarily overriding ``env`` with given key/value pairs.
A callable that returns a dict can also be passed. This is necessary when
new values are being calculated from current values, in order to ensure that
the "current" value is current at the time that the context is entered, not
when the context manager is initialized. (See Issue #736.)
This context manager is used internally by `settings` and is not intended
to be used directly.
"""
if callable(variables):
variables = variables()
clean_revert = variables.pop('clean_revert', False)
previous = {}
new = []
for key, value in six.iteritems(variables):
if key in state.env:
previous[key] = state.env[key]
else:
new.append(key)
state.env[key] = value
try:
yield
finally:
if clean_revert:
for key, value in six.iteritems(variables):
# If the current env value for this key still matches the
# value we set it to beforehand, we are OK to revert it to the
# pre-block value.
if key in state.env and value == state.env[key]:
if key in previous:
state.env[key] = previous[key]
else:
del state.env[key]
else:
state.env.update(previous)
for key in new:
del state.env[key]
def settings(*args, **kwargs):
"""
Nest context managers and/or override ``env`` variables.
`settings` serves two purposes:
* Most usefully, it allows temporary overriding/updating of ``env`` with
any provided keyword arguments, e.g. ``with settings(user='foo'):``.
Original values, if any, will be restored once the ``with`` block closes.
* The keyword argument ``clean_revert`` has special meaning for
``settings`` itself (see below) and will be stripped out before
execution.
* In addition, it will use `contextlib.nested`_ to nest any given
non-keyword arguments, which should be other context managers, e.g.
``with settings(hide('stderr'), show('stdout')):``.
.. _contextlib.nested: http://docs.python.org/library/contextlib.html#contextlib.nested
These behaviors may be specified at the same time if desired. An example
will hopefully illustrate why this is considered useful::
def my_task():
with settings(
hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True
):
if run('ls /etc/lsb-release'):
return 'Ubuntu'
elif run('ls /etc/redhat-release'):
return 'RedHat'
The above task executes a `run` statement, but will warn instead of
aborting if the ``ls`` fails, and all output -- including the warning
itself -- is prevented from printing to the user. The end result, in this
scenario, is a completely silent task that allows the caller to figure out
what type of system the remote host is, without incurring the handful of
output that would normally occur.
Thus, `settings` may be used to set any combination of environment
variables in tandem with hiding (or showing) specific levels of output, or
in tandem with any other piece of Fabric functionality implemented as a
context manager.
If ``clean_revert`` is set to ``True``, ``settings`` will **not** revert
keys which are altered within the nested block, instead only reverting keys
whose values remain the same as those given. More examples will make this
clear; below is how ``settings`` operates normally::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost'):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string is None again
The internal modification of ``env.host_string`` is nullified -- not always
desirable. That's where ``clean_revert`` comes in::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost', clean_revert=True):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string remains 'otherhost'
Brand new keys which did not exist in ``env`` prior to using ``settings``
are also preserved if ``clean_revert`` is active. When ``False``, such keys
are removed when the block exits.
.. versionadded:: 1.4.1
The ``clean_revert`` kwarg.
"""
managers = list(args)
if kwargs:
managers.append(_setenv(kwargs))
return nested(*managers)
def cd(path):
"""
Context manager that keeps directory state when calling remote operations.
Any calls to `run`, `sudo`, `get`, or `put` within the wrapped block will
implicitly have a string similar to ``"cd <path> && "`` prefixed in order
to give the sense that there is actually statefulness involved.
.. note::
`cd` only affects *remote* paths -- to modify *local* paths, use
`~fabric.context_managers.lcd`.
Because use of `cd` affects all such invocations, any code making use of
those operations, such as much of the ``contrib`` section, will also be
affected by use of `cd`.
Like the actual 'cd' shell builtin, `cd` may be called with relative paths
(keep in mind that your default starting directory is your remote user's
``$HOME``) and may be nested as well.
Below is a "normal" attempt at using the shell 'cd', which doesn't work due
to how shell-less SSH connections are implemented -- state is **not** kept
between invocations of `run` or `sudo`::
run('cd /var/www')
run('ls')
The above snippet will list the contents of the remote user's ``$HOME``
instead of ``/var/www``. With `cd`, however, it will work as expected::
with cd('/var/www'):
run('ls') # Turns into "cd /var/www && ls"
Finally, a demonstration (see inline comments) of nesting::
with cd('/var/www'):
run('ls') # cd /var/www && ls
with cd('website1'):
run('ls') # cd /var/www/website1 && ls
.. note::
This context manager is currently implemented by appending to (and, as
always, restoring afterwards) the current value of an environment
variable, ``env.cwd``. However, this implementation may change in the
future, so we do not recommend manually altering ``env.cwd`` -- only
the *behavior* of `cd` will have any guarantee of backwards
compatibility.
.. note::
Space characters will be escaped automatically to make dealing with
such directory names easier.
.. versionchanged:: 1.0
Applies to `get` and `put` in addition to the command-running
operations.
.. seealso:: `~fabric.context_managers.lcd`
"""
return _change_cwd('cwd', path)
def lcd(path):
"""
Context manager for updating local current working directory.
This context manager is identical to `~fabric.context_managers.cd`, except
that it changes a different env var (`lcwd`, instead of `cwd`) and thus
only affects the invocation of `~fabric.operations.local` and the local
arguments to `~fabric.operations.get`/`~fabric.operations.put`.
Relative path arguments are relative to the local user's current working
directory, which will vary depending on where Fabric (or Fabric-using code)
was invoked. You can check what this is with `os.getcwd
<http://docs.python.org/release/2.6/library/os.html#os.getcwd>`_. It may be
useful to pin things relative to the location of the fabfile in use, which
may be found in :ref:`env.real_fabfile <real-fabfile>`
.. versionadded:: 1.0
"""
return _change_cwd('lcwd', path)
def _change_cwd(which, path):
path = path.replace(' ', r'\ ')
if state.env.get(which) and not path.startswith('/') and not path.startswith('~'):
new_cwd = state.env.get(which) + '/' + path
else:
new_cwd = path
return _setenv({which: new_cwd})
def path(path, behavior='append'):
"""
Append the given ``path`` to the PATH used to execute any wrapped commands.
Any calls to `run` or `sudo` within the wrapped block will implicitly have
a string similar to ``"PATH=$PATH:<path> "`` prepended before the given
command.
You may customize the behavior of `path` by specifying the optional
``behavior`` keyword argument, as follows:
* ``'append'``: append given path to the current ``$PATH``, e.g.
``PATH=$PATH:<path>``. This is the default behavior.
* ``'prepend'``: prepend given path to the current ``$PATH``, e.g.
``PATH=<path>:$PATH``.
* ``'replace'``: ignore previous value of ``$PATH`` altogether, e.g.
``PATH=<path>``.
.. note::
This context manager is currently implemented by modifying (and, as
always, restoring afterwards) the current value of environment
variables, ``env.path`` and ``env.path_behavior``. However, this
implementation may change in the future, so we do not recommend
manually altering them directly.
.. versionadded:: 1.0
"""
return _setenv({'path': path, 'path_behavior': behavior})
def prefix(command):
"""
Prefix all wrapped `run`/`sudo` commands with given command plus ``&&``.
This is nearly identical to `~fabric.operations.cd`, except that nested
invocations append to a list of command strings instead of modifying a
single string.
Most of the time, you'll want to be using this alongside a shell script
which alters shell state, such as ones which export or alter shell
environment variables.
For example, one of the most common uses of this tool is with the
``workon`` command from `virtualenvwrapper
<http://www.doughellmann.com/projects/virtualenvwrapper/>`_::
with prefix('workon myvenv'):
run('./manage.py syncdb')
In the above snippet, the actual shell command run would be this::
$ workon myvenv && ./manage.py syncdb
This context manager is compatible with `~fabric.context_managers.cd`, so
if your virtualenv doesn't ``cd`` in its ``postactivate`` script, you could
do the following::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
Which would result in executions like so::
$ cd /path/to/app && workon myvenv && ./manage.py syncdb
$ cd /path/to/app && workon myvenv && ./manage.py loaddata myfixture
Finally, as alluded to near the beginning,
`~fabric.context_managers.prefix` may be nested if desired, e.g.::
with prefix('workon myenv'):
run('ls')
with prefix('source /some/script'):
run('touch a_file')
The result::
$ workon myenv && ls
$ workon myenv && source /some/script && touch a_file
Contrived, but hopefully illustrative.
"""
return _setenv(lambda: {'command_prefixes': state.env.command_prefixes + [command]})
@documented_contextmanager
def char_buffered(pipe):
"""
Force local terminal ``pipe`` be character, not line, buffered.
Only applies on Unix-based systems; on Windows this is a no-op.
"""
if win32 or not isatty(pipe):
yield
else:
old_settings = termios.tcgetattr(pipe)
tty.setcbreak(pipe)
try:
yield
finally:
termios.tcsetattr(pipe, termios.TCSADRAIN, old_settings)
def shell_env(**kw):
"""
Set shell environment variables for wrapped commands.
For example, the below shows how you might set a ZeroMQ related environment
variable when installing a Python ZMQ library::
with shell_env(ZMQ_DIR='/home/user/local'):
run('pip install pyzmq')
As with `~fabric.context_managers.prefix`, this effectively turns the
``run`` command into::
$ export ZMQ_DIR='/home/user/local' && pip install pyzmq
Multiple key-value pairs may be given simultaneously.
.. note::
If used to affect the behavior of `~fabric.operations.local` when
running from a Windows localhost, ``SET`` commands will be used to
implement this feature.
"""
return _setenv({'shell_env': kw})
def _forwarder(chan, sock):
# Bidirectionally forward data between a socket and a Paramiko channel.
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
@documented_contextmanager
def remote_tunnel(remote_port, local_port=None, local_host="localhost",
remote_bind_address="127.0.0.1"):
"""
Create a tunnel forwarding a locally-visible port to the remote target.
For example, you can let the remote host access a database that is
installed on the client host::
# Map localhost:6379 on the server to localhost:6379 on the client,
# so that the remote 'redis-cli' program ends up speaking to the local
# redis-server.
with remote_tunnel(6379):
run("redis-cli -i")
The database might be installed on a client only reachable from the client
host (as opposed to *on* the client itself)::
# Map localhost:6379 on the server to redis.internal:6379 on the client
with remote_tunnel(6379, local_host="redis.internal")
run("redis-cli -i")
``remote_tunnel`` accepts up to four arguments:
* ``remote_port`` (mandatory) is the remote port to listen to.
* ``local_port`` (optional) is the local port to connect to; the default is
the same port as the remote one.
* ``local_host`` (optional) is the locally-reachable computer (DNS name or
IP address) to connect to; the default is ``localhost`` (that is, the
same computer Fabric is running on).
* ``remote_bind_address`` (optional) is the remote IP address to bind to
for listening, on the current target. It should be an IP address assigned
to an interface on the target (or a DNS name that resolves to such IP).
You can use "0.0.0.0" to bind to all interfaces.
.. note::
By default, most SSH servers only allow remote tunnels to listen to the
localhost interface (127.0.0.1). In these cases, `remote_bind_address`
is ignored by the server, and the tunnel will listen only to 127.0.0.1.
.. versionadded: 1.6
"""
if local_port is None:
local_port = remote_port
sockets = []
channels = []
threads = []
def accept(channel, src, dest):
src_addr, src_port = src
dest_addr, dest_port = dest
channels.append(channel)
sock = socket.socket()
sockets.append(sock)
try:
sock.connect((local_host, local_port))
except Exception:
print("[%s] rtunnel: cannot connect to %s:%d (from local)" %
(env.host_string, local_host, local_port))
channel.close()
return
print("[%s] rtunnel: opened reverse tunnel: %r -> %r -> %r"
% (env.host_string, channel.origin_addr,
channel.getpeername(), (local_host, local_port)))
th = ThreadHandler('fwd', _forwarder, channel, sock)
threads.append(th)
transport = connections[env.host_string].get_transport()
transport.request_port_forward(remote_bind_address, remote_port, handler=accept)
try:
yield
finally:
for sock, chan, th in zip(sockets, channels, threads):
sock.close()
chan.close()
th.thread.join()
th.raise_if_needed()
transport.cancel_port_forward(remote_bind_address, remote_port)
quiet = lambda: settings(hide('everything'), warn_only=True)
quiet.__doc__ = """
Alias to ``settings(hide('everything'), warn_only=True)``.
Useful for wrapping remote interrogative commands which you expect to fail
occasionally, and/or which you want to silence.
Example::
with quiet():
have_build_dir = run("test -e /tmp/build").succeeded
When used in a task, the above snippet will not produce any ``run: test -e
/tmp/build`` line, nor will any stdout/stderr display, and command failure
is ignored.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.hide`
.. versionadded:: 1.5
"""
warn_only = lambda: settings(warn_only=True)
warn_only.__doc__ = """
Alias to ``settings(warn_only=True)``.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.quiet`
"""
| bsd-2-clause | -7,542,093,378,251,034,000 | 33.760797 | 91 | 0.632132 | false |
LokiW/extendable-cards | extendable_cards/view/game_view.py | 1 | 6424 | from extendable_cards.view.graphics import Rectangle, Point, Text
from tkinter import Button
class GameOutline(object):
def __init__(self, window, dx, dy, w, h):
self.top_y = dy
self.bottom_y = dy+h
self.right_x = dx+w
self.left_x = dx
self.discard_end_x = dx + (w/6.0)
self.discard_top_y = self.bottom_y - (h/3.0)
discard_p_b = Point(dx+1, self.bottom_y-1)
discard_p_t = Point(self.discard_end_x, self.discard_top_y)
discard_text_p = Point((2*dx + (w/6.0))/2, (self.bottom_y - (h / 6.0)))
self.discard = Rectangle(discard_p_b, discard_p_t)
self.discard.setFill("grey")
self.discard_text = Text(discard_text_p, "DISCARD PILE")
self.deck_begin_x = self.right_x - (w/6.0)
deck_p_b = Point(self.right_x-1, self.bottom_y-1)
deck_p_t = Point(self.deck_begin_x, self.bottom_y - (h / 3.0))
deck_text_p = Point(self.right_x - (w / 12.0), self.bottom_y - (h / 6.0))
self.deck = Rectangle(deck_p_b, deck_p_t)
self.deck.setFill("grey")
self.deck_text = Text(deck_text_p, "DECK")
self.hand = []
self.in_play = []
self.selected = []
self.win = window
def display_outline(self):
self.discard.draw(self.win)
self.deck.draw(self.win)
def display_outline_with_labels(self):
self.display_outline()
self.deck_text.draw(self.win)
self.discard_text.draw(self.win)
def undisplay_labels(self):
self.deck_text.undraw()
self.discard_text.undraw()
def display_hand_area(self):
self._display_card_list(self.hand, PlayArea.HAND)
def undisplay_hand_area(self):
for card in self.hand:
card.undisplay()
def display_play_area(self):
self._display_card_list(self.in_play, PlayArea.IN_PLAY)
def _display_card_list(self, cards, play_area):
card_num = len(cards)
if card_num == 0:
return False
cur_card = 0
lx, by, rx, ty = self.get_area_points(play_area)
y_unit = (by - ty) / 50.0
card_height = by - ty - 2*y_unit
card_width = card_height * (5.0/7.0)
x_unit = ((rx - card_width) - lx)/card_num
for card in cards:
card.display_card(lx + (cur_card*x_unit), ty + y_unit, w=card_width, h=card_height)
cur_card += 1
def undisplay_play_area(self):
for card in self.in_play:
card.undisplay()
def select_cards(self, cards, play_area):
self.selected.append({'card': card_type(card), 'origin': play_area})
def select_card(self, card, play_area):
for card in cards:
if play_area == PlayArea.HAND:
for hc in self.hand[:]:
if hc.card.name == card.card.name:
self.selected.append({'card':hc, 'origin': play_area})
self.hand.remove(hc)
return
elif play_area == PlayArea.IN_PLAY:
for ipc in self.in_play[:]:
if ipc.card.name == card.card.name:
self.selected.append({'card':ipc, 'origin': play_area})
self.in_play.remove(ipc)
return
elif play_area == PlayArea.DECK or play_area == PlayArea.DISCARD:
self.selected.append({'card': card_type(card), 'origin': play_area})
elif play_area == PlayArea.SELECTION:
for sc, origin in self.selected:
if sc.card.name == card.card.name:
self.return_selections()
self.selected.append({'card': sc, 'origin': origin})
return
def return_selections(self):
self.undisplay_selection()
for card in self.selected[:]:
if card['origin'] == PlayArea.HAND:
self.hand.append(card)
self.selected.remove(card)
elif card['origin'] == PlayArea.IN_PLAY:
self.in_play.append(card)
self.selected.remove(card)
else:
self.selected.remove(card)
def display_selection(self):
self._display_card_list([item['card'] for item in self.selected], PlayArea.SELECTION)
def undisplay_selection(self):
for card in self.selected:
card.undisplay()
def add_to_hand_area(self, card_view):
self.hand.append(card_view)
def add_to_play_area(self, card_view):
self.in_play.append(card_view)
def get_card_at_point(self, point, area):
x = point.getX()
y = point.getY()
if area == PlayArea.HAND:
last_seen = None
for card in self.hand:
lx = min(card.card.getP1().getX(), card.card.getP2().getX())
if lx < x:
last_seen = card
else:
return last_seen
return last_seen
def get_area(self, point):
x = point.getX()
y = point.getY()
if y < self.discard_top_y:
return PlayArea.IN_PLAY
elif x < self.discard_end_x:
return PlayArea.DISCARD
elif x > self.deck_begin_x:
return PlayArea.DECK
elif len(self.selected) > 0:
return PlayArea.HAND
else:
return PlayArea.HAND
def get_area_points(self, area):
if area == PlayArea.IN_PLAY:
return (self.left_x, self.discard_top_y, self.right_x, self.top_y)
elif area == PlayArea.DISCARD:
return (self.left_x, self.bottom_y, self.discard_end_x, self.discard_top_y)
elif area == PlayArea.HAND:
return (self.discard_end_x, self.bottom_y, self.deck_begin_x, self.discard_top_y)
elif area == PlayArea.DECK:
return (self.deck_begin_x, self.bottom_y, self.right_x, self.discard_top_y)
elif area == PlayArea.SELECTION:
return (self.discard_end_x, self.bottom_y - (self.bottom_y - self.discard_top_y)*(2.0/3.0),
self.deck_begin_x, self.bottom_y - (self.bottom_y - self.discard_top_y)*(5.0/3.0))
class PlayArea(object):
IN_PLAY = "play"
DISCARD = "discard"
DECK = "deck"
HAND = "hand"
SELECTION = "selection"
| bsd-2-clause | 7,205,527,144,953,349,000 | 30.960199 | 103 | 0.538294 | false |
google/tf-quant-finance | tf_quant_finance/experimental/pricing_platform/framework/market_data/rate_curve.py | 1 | 13760 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of RateCurve object."""
from typing import Optional, Tuple
import tensorflow.compat.v2 as tf
from tf_quant_finance import datetime as dateslib
from tf_quant_finance import math
from tf_quant_finance import rates as rates_lib
from tf_quant_finance.experimental.pricing_platform.framework.core import curve_types
from tf_quant_finance.experimental.pricing_platform.framework.core import daycount_conventions
from tf_quant_finance.experimental.pricing_platform.framework.core import interpolation_method
from tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd
from tf_quant_finance.experimental.pricing_platform.framework.core import types
from tf_quant_finance.experimental.pricing_platform.framework.market_data import utils
_DayCountConventions = daycount_conventions.DayCountConventions
_InterpolationMethod = interpolation_method.InterpolationMethod
_DayCountConventionsProtoType = types.DayCountConventionsProtoType
class RateCurve(pmd.RateCurve):
"""Represents an interest rate curve."""
def __init__(
self,
maturity_dates: types.DateTensor,
discount_factors: tf.Tensor,
valuation_date: types.DateTensor,
interpolator: Optional[_InterpolationMethod] = None,
interpolate_rates: Optional[bool] = True,
daycount_convention: Optional[_DayCountConventionsProtoType] = None,
curve_type: Optional[curve_types.CurveType] = None,
dtype: Optional[tf.DType] = None,
name: Optional[str] = None):
"""Initializes the interest rate curve.
Args:
maturity_dates: A `DateTensor` containing the maturity dates on which the
curve is specified.
discount_factors: A `Tensor` of real dtype specifying the discount factors
corresponding to the input maturities. The shape of this input should
match the shape of `maturity_dates`.
valuation_date: A scalar `DateTensor` specifying the valuation (or
settlement) date for the curve.
interpolator: An instance of `InterpolationMethod`.
Default value: `None` in which case cubic interpolation is used.
interpolate_rates: A boolean specifying whether the interpolation should
be done in discount rates or discount factors space.
Default value: `True`, i.e., interpolation is done in the discount
factors space.
daycount_convention: `DayCountConventions` to use for the interpolation
purpose.
Default value: `None` which maps to actual/365 day count convention.
curve_type: An instance of `CurveTypes` to mark the rate curve.
Default value: `None` which means that the curve does not have the
marker.
dtype: `tf.Dtype`. Optional input specifying the dtype of the `rates`
input.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'rate_curve'.
"""
self._name = name or "rate_curve"
with tf.compat.v1.name_scope(self._name):
self._discount_factor_nodes = tf.convert_to_tensor(
discount_factors, dtype=dtype,
name="curve_discount_factors")
self._dtype = dtype or self._discount_factor_nodes.dtype
if interpolator is None or interpolator == _InterpolationMethod.CUBIC:
def cubic_interpolator(xi, x, y):
spline_coeffs = math.interpolation.cubic.build_spline(x, y)
return math.interpolation.cubic.interpolate(xi, spline_coeffs,
dtype=dtype)
interpolator = cubic_interpolator
self._interpolation_method = _InterpolationMethod.CUBIC
elif interpolator == _InterpolationMethod.LINEAR:
def linear_interpolator(xi, x, y):
return math.interpolation.linear.interpolate(xi, x, y,
dtype=dtype)
interpolator = linear_interpolator
self._interpolation_method = _InterpolationMethod.LINEAR
elif interpolator == _InterpolationMethod.CONSTANT_FORWARD:
def constant_fwd(xi, x, y):
return rates_lib.constant_fwd.interpolate(xi, x, y, dtype=dtype)
interpolator = constant_fwd
self._interpolation_method = _InterpolationMethod.CONSTANT_FORWARD
else:
raise ValueError(f"Unknown interpolation method {interpolator}.")
self._dates = dateslib.convert_to_date_tensor(maturity_dates)
self._valuation_date = dateslib.convert_to_date_tensor(
valuation_date)
self._daycount_convention = (
daycount_convention or _DayCountConventions.ACTUAL_365)
self._day_count_fn = utils.get_daycount_fn(self._daycount_convention)
self._times = self._get_time(self._dates)
self._interpolator = interpolator
self._interpolate_rates = interpolate_rates
# Precompute discount rates:
self._curve_type = curve_type
@property
def daycount_convention(self) -> types.DayCountConventionsProtoType:
"""Daycount convention."""
return self._daycount_convention
def daycount_fn(self):
"""Daycount function."""
return self._day_count_fn
@property
def discount_factor_nodes(self) -> types.FloatTensor:
"""Discount factors at the interpolation nodes."""
return self._discount_factor_nodes
@property
def node_dates(self) -> types.DateTensor:
"""Dates at which the discount factors and rates are specified."""
return self._dates
@property
def discount_rate_nodes(self) -> types.FloatTensor:
"""Discount rates at the interpolation nodes."""
discount_rates = tf.math.divide_no_nan(
-tf.math.log(self.discount_factor_nodes), self._times,
name="discount_rate_nodes")
return discount_rates
def set_discount_factor_nodes(self, values: types.FloatTensor):
"""Update discount factors at the interpolation nodes with new values."""
values = tf.convert_to_tensor(values, dtype=self._dtype)
values_shape = values.shape.as_list()
nodes_shape = self.discount_factor_nodes.shape.as_list()
if values_shape != nodes_shape:
raise ValueError("New values should have shape {0} but are of "
"shape {1}".format(nodes_shape, values_shape))
self._discount_factor_nodes = values
def discount_rate(self,
interpolation_dates: Optional[types.DateTensor] = None,
interpolation_times: Optional[types.FloatTensor] = None,
name: Optional[str] = None):
"""Returns interpolated rates at `interpolation_dates`."""
if interpolation_dates is None and interpolation_times is None:
raise ValueError("Either interpolation_dates or interpolation times "
"must be supplied.")
if interpolation_dates is not None:
interpolation_dates = dateslib.convert_to_date_tensor(
interpolation_dates)
times = self._get_time(interpolation_dates)
else:
times = tf.convert_to_tensor(interpolation_times, self._dtype)
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
if self._interpolate_rates:
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
else:
discount_factor = self._interpolator(
times, self._times, self.discount_factor_nodes)
rates = -tf.math.divide_no_nan(
tf.math.log(discount_factor), times)
return tf.identity(rates, name=name or "discount_rate")
def discount_factor(self,
interpolation_dates: Optional[types.DateTensor] = None,
interpolation_times: Optional[types.FloatTensor] = None,
name: Optional[str] = None):
"""Returns discount factors at `interpolation_dates`."""
if interpolation_dates is None and interpolation_times is None:
raise ValueError("Either interpolation_dates or interpolation times "
"must be supplied.")
if interpolation_dates is not None:
interpolation_dates = dateslib.convert_to_date_tensor(
interpolation_dates)
times = self._get_time(interpolation_dates)
else:
times = tf.convert_to_tensor(interpolation_times, self._dtype)
if self._interpolate_rates:
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
discount_factor = tf.math.exp(-rates * times)
else:
discount_factor = self._interpolator(
times, self._times, self.discount_factor_nodes)
return tf.identity(discount_factor, name=name or "discount_factor")
def forward_rate(
self,
start_date: Optional[types.DateTensor] = None,
maturity_date: Optional[types.DateTensor] = None,
start_time: Optional[types.FloatTensor] = None,
maturity_time: Optional[types.FloatTensor] = None,
day_count_fraction: Optional[tf.Tensor] = None):
"""Returns the simply accrued forward rate between [start_dt, maturity_dt].
Args:
start_date: A `DateTensor` specifying the start of the accrual period
for the forward rate. The function expects either `start_date` or
`start_time` to be specified.
maturity_date: A `DateTensor` specifying the end of the accrual period
for the forward rate. The shape of `end_date` must be broadcastable
with the shape of `start_date`. The function expects either `end_date`
or `end_time` to be specified.
start_time: A real `Tensor` specifying the start of the accrual period
for the forward rate. The function expects either `start_date` or
`start_time` to be specified.
maturity_time: A real `Tensor` specifying the end of the accrual period
for the forward rate. The shape of `end_date` must be broadcastable
with the shape of `start_date`. The function expects either `end_date`
or `end_time` to be specified.
day_count_fraction: An optional `Tensor` of real dtype specifying the
time between `start_date` and `maturity_date` in years computed using
the forward rate's day count basis. The shape of the input should be
the same as that of `start_date` and `maturity_date`.
Default value: `None`, in which case the daycount fraction is computed
using `daycount_convention`.
Returns:
A real `Tensor` of same shape as the inputs containing the simply
compounded forward rate.
"""
if start_date is None and start_time is None:
raise ValueError("Either start_date or start_times "
"must be supplied.")
if maturity_date is None and maturity_time is None:
raise ValueError("Either maturity_date or maturity_time must be "
"supplied.")
if start_date is not None and maturity_date is not None:
start_date = dateslib.convert_to_date_tensor(start_date)
maturity_date = dateslib.convert_to_date_tensor(maturity_date)
if day_count_fraction is None:
day_count_fn = self._day_count_fn
day_count_fraction = day_count_fn(
start_date=start_date, end_date=maturity_date, dtype=self._dtype)
else:
day_count_fraction = tf.convert_to_tensor(day_count_fraction,
self._dtype,
name="day_count_fraction")
start_time = self._get_time(start_date)
maturity_time = self._get_time(maturity_date)
else:
start_time = tf.convert_to_tensor(start_time, dtype=self._dtype)
maturity_time = tf.convert_to_tensor(maturity_time, dtype=self._dtype)
day_count_fraction = maturity_time - start_time
dfstart = self.discount_factor(interpolation_times=start_time)
dfmaturity = self.discount_factor(interpolation_times=maturity_time)
return tf.math.divide_no_nan(
tf.math.divide_no_nan(dfstart, dfmaturity) - 1., day_count_fraction)
@property
def valuation_date(self) -> types.DateTensor:
return self._valuation_date
@property
def interpolation_method(self) -> _InterpolationMethod:
return self._interpolation_method
def _get_time(self,
dates: types.DateTensor) -> types.FloatTensor:
"""Computes the year fraction from the curve's valuation date."""
return self._day_count_fn(start_date=self._valuation_date,
end_date=dates,
dtype=self._dtype)
@property
def curve_type(self) -> curve_types.CurveType:
return self._curve_type
def discount_factors_and_dates(self) -> Tuple[types.FloatTensor,
types.DateTensor]:
"""Returns discount factors and dates at which the discount curve is fitted.
"""
return (self._discount_factor_nodes, self._dates)
@property
def dtype(self) -> types.Dtype:
return self._dtype
@property
def interpolate_rates(self) -> bool:
"""Returns `True` if the interpolation is on rates and not on discounts."""
return self._interpolate_rates
__all__ = ["RateCurve"]
| apache-2.0 | -6,346,764,848,289,983,000 | 44.114754 | 102 | 0.672456 | false |
Davasny/CCAS | ccas/models/exchanges/__init__.py | 1 | 1783 | from . import poloniex, btc_e, bittrex, bitfinex
from ccas.models import database, coinmarketcap
def get_balances(exchange, public_key, secret_key):
if exchange == "poloniex":
return poloniex.get_balances(public_key, secret_key)
if exchange == "btc-e":
return btc_e.get_balances(public_key, secret_key)
if exchange == "bittrex":
return bittrex.get_balances(public_key, secret_key)
if exchange == "bitfinex":
return bitfinex.get_balances(public_key, secret_key)
def get_exchanges():
response = database.new_query("SELECT id, exchange FROM exchanges_api_keys;")
return list(response)
def get_btc_price():
exchange = database.new_query("SELECT `exchange` FROM `coins_prices` WHERE `name`='btc';")
if exchange:
exchange = exchange[0][0]
if exchange == "poloniex":
return poloniex.get_btc_price()
if exchange == "btc-e":
return btc_e.get_btc_price()
if exchange == "bittrex":
return bittrex.get_btc_price()
if exchange == "bitfinex":
return bitfinex.get_btc_price()
else:
return -1
def get_price(currency):
exchange = database.new_query("SELECT `exchange` FROM `coins_prices` WHERE `name`='"+ currency.lower() +"';")
if exchange:
exchange = exchange[0][0]
if exchange == "poloniex":
return poloniex.get_price(currency)
if exchange == "btc-e":
return btc_e.get_price(currency)
if exchange == "bittrex":
return bittrex.get_price(currency)
if exchange == "bitfinex":
return bitfinex.get_price(currency)
if exchange == "coinmarketcap":
return coinmarketcap.get_price(currency)
else:
return -1
| mit | 4,158,025,492,351,887,000 | 33.960784 | 113 | 0.615816 | false |
veryhappythings/discord-gather | gather/discord_gather.py | 1 | 2123 | import asyncio
import logging
import discord
from .gatherbot import GatherBot
from .organiser import Organiser
from . import commands
logger = logging.getLogger(__name__)
class DiscordGather:
def __init__(self, token):
self.token = token
self.bot = None
self.client = discord.Client()
self.client.on_ready = self.on_ready
asyncio.get_event_loop().call_soon(self._report_loop)
def run(self):
self.client.run(self.token)
async def on_ready(self):
self.bot = GatherBot(self.client.user.name)
self.bot.register_message_handler(self.client.send_message)
self.bot.register_action('^!help$', commands.bot_help)
self.bot.register_action('^!(?:add|join|s)$', commands.add)
self.bot.register_action('^!(?:remove|rem|so)$', commands.remove)
self.bot.register_action('^!(?:game|status)$', commands.game_status)
self.bot.register_action('^!(?:reset)$', commands.reset)
self.client.on_member_update = self.on_member_update
self.client.on_message = self.bot.on_message
logger.info('Logged in as')
logger.info(self.bot.username)
logger.info('------')
async def on_member_update(self, before, after):
# Handle players going offline
if (before.status == discord.Status.online and
after.status == discord.Status.offline):
await self.bot.member_went_offline(before)
# Handle players going AFK
elif (before.status == discord.Status.online and
after.status == discord.Status.idle):
await self.bot.member_went_afk(before)
def _report_loop(self):
if self.bot:
logger.info(report(self.bot.organiser))
asyncio.get_event_loop().call_later(60 * 10, self._report_loop)
def report(organiser: Organiser) -> str:
report = ["Report:"]
for key, queue in organiser.queues.items():
report.append("{}-{}: {} current players - {} games to date".format(
key.server, key, len(queue), organiser.games_count[key]))
return "\n".join(report)
| mit | -7,811,874,497,295,730,000 | 33.803279 | 76 | 0.628356 | false |
GoogleCloudPlatform/cloudml-samples | chainer/containers/quickstart/mnist/trainer/mnist.py | 1 | 6554 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import six
import subprocess
import hypertune
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer import serializers
MODEL_FILE_NAME = 'chainer.model'
class Net(chainer.Chain):
def __init__(self):
super(Net, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(1, 10, ksize=5)
self.conv2 = L.Convolution2D(10, 20, ksize=5)
self.fc1 = L.Linear(None, 50)
self.fc2 = L.Linear(None, 10)
def forward(self, x):
x = F.relu(F.max_pooling_2d(self.conv1(x), 2))
x = F.relu(F.max_pooling_2d(F.dropout(self.conv2(x)), 2))
x = F.reshape(F.flatten(x), (-1, 320))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
class HpReport(chainer.training.Extension):
"""Trainer extension for hyper parameter tuning with CMLE.
Args:
log_report (str or LogReport): Log report to accumulate the
observations. This is either the name of a LogReport extensions
registered to the trainer, or a LogReport instance to use
internally.
global_step: key to epoch
hyperparameter_metric_tag: user-defined
metric_value: key to metric
"""
def __init__(self,
log_report='LogReport',
hp_global_step='epoch',
hp_metric_val='validation/main/loss',
hp_metric_tag='loss'):
self._log_report = log_report
self._log_len = 0 # number of observations already done
self._hp_global_step = hp_global_step
self._hp_metric_val = hp_metric_val
self._hp_metric_tag = hp_metric_tag
def __call__(self, trainer):
log_report = self._log_report
if isinstance(log_report, str):
log_report = trainer.get_extension(log_report)
elif isinstance(log_report, log_report_module.LogReport):
log_report(trainer) # update the log report
else:
raise TypeError('log report has a wrong type %s' %
type(log_report))
log = log_report.log
log_len = self._log_len
hpt = hypertune.HyperTune()
while len(log) > log_len:
target_log = log[log_len]
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag=self._hp_metric_tag,
metric_value=target_log[self._hp_metric_val],
global_step=target_log[self._hp_global_step])
log_len += 1
self.log_len = log_len
def get_args():
"""Argument parser.
Returns:
Dictionary of arguments.
"""
parser = argparse.ArgumentParser(description='Chainer MNIST Example')
parser.add_argument(
'--batch-size',
type=int,
default=100,
metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument(
'--test-batch-size',
type=int,
default=1000,
metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument(
'--epochs',
type=int,
default=10,
metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument(
'--lr',
type=float,
default=0.01,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum',
type=float,
default=0.5,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument(
'--model-dir',
default=None,
help='The directory to store the model')
parser.add_argument(
'--gpu',
type=int,
default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument(
'--resume',
action='store_true',
help='Resume training')
args = parser.parse_args()
return args
def main():
# Training settings
args = get_args()
# Set up a neural network to train
model = L.Classifier(Net())
if args.gpu >= 0:
# Make a specified GPU current
chainer.backends.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=args.momentum)
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist(ndim=3)
train_iter = chainer.iterators.SerialIterator(train, args.batch_size)
test_iter = chainer.iterators.SerialIterator(test, args.test_batch_size,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epochs, 'epoch'))
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Print selected entries of the log to stdout
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Send selected entries of the log to CMLE HP tuning system
trainer.extend(
HpReport(hp_metric_val='validation/main/loss', hp_metric_tag='my_loss'))
if args.resume:
# Resume from a snapshot
tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
if not os.path.exists(tmp_model_file):
subprocess.check_call([
'gsutil', 'cp', os.path.join(args.model_dir, MODEL_FILE_NAME),
tmp_model_file])
if os.path.exists(tmp_model_file):
chainer.serializers.load_npz(tmp_model_file, trainer)
trainer.run()
if args.model_dir:
tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
serializers.save_npz(tmp_model_file, model)
subprocess.check_call([
'gsutil', 'cp', tmp_model_file,
os.path.join(args.model_dir, MODEL_FILE_NAME)])
if __name__ == '__main__':
main()
| apache-2.0 | 508,161,105,582,383,800 | 30.209524 | 80 | 0.652121 | false |
yasserglez/pytiger2c | packages/pytiger2c/ast/integerliteralexpressionnode.py | 1 | 2204 | # -*- coding: utf-8 -*-
"""
Clase C{IntegerLiteralExpressionNode} del árbol de sintáxis abstracta.
"""
from pytiger2c.ast.valuedexpressionnode import ValuedExpressionNode
from pytiger2c.types.integertype import IntegerType
class IntegerLiteralExpressionNode(ValuedExpressionNode):
"""
Clase C{IntegerLiteralExpressionNode} del árbol de sintáxis abstracta.
Representa un literal de un número entero en el lenguaje Tiger. El valor
de retorno de esta expresión siempre será C{IntegerType}.
"""
def _get_integer(self):
"""
Método para obtener el valor de la propiedad C{integer}.
"""
return self._integer
integer = property(_get_integer)
def __init__(self, integer):
"""
Inicializa la clase C{IntegerLiteralExpressionNode}.
@type integer: C{int}
@param integer: Valor del número entero literal.
"""
super(IntegerLiteralExpressionNode, self).__init__()
self._integer = integer
def check_semantics(self, scope, errors):
"""
Para obtener información acerca de los parámetros recibidos por
el método consulte la documentación del método C{check_semantics}
en la clase C{LanguageNode}.
Este nodo del árbol de sintáxis abstracta no requiere comprobación
semántica, solamente se da valor al tipo de retorno del nodo que
siempre será C{IntegerType}.
"""
self._scope = scope
self._return_type = IntegerType()
def generate_code(self, generator):
"""
Genera el código correspondiente a la estructura del lenguaje Tiger
representada por el nodo.
Para obtener información acerca de los parámetros recibidos por
este método consulte la documentación del método C{generate_code}
de la clase C{LanguageNode}.
"""
self.scope.generate_code(generator)
int_code_type = IntegerType().code_type
local_var = generator.define_local(int_code_type)
generator.add_statement('{0} = {1};'.format(local_var, self.integer))
self._code_name = local_var
| mit | 7,899,778,051,832,931,000 | 33.587302 | 77 | 0.652134 | false |
jgmanzanas/CMNT_004_15 | project-addons/sale_display_stock/report/sale_order_line_report.py | 1 | 4447 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Comunitea All Rights Reserved
# $Jesús Ventosinos Mayor <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, tools
class sale_order_line_report(models.Model):
_name = 'sale.order.line.report'
_auto = False
name = fields.Char('Name', readonly=True)
partner_id = fields.Many2one('res.partner', 'Partner', readonly=True)
product_qty = fields.Float('Quantity', readonly=True)
uom = fields.Many2one('product.uom', 'UoM', readonly=True)
price_unit = fields.Float('Price unit', readonly=True)
discount = fields.Float('Discount', readonly=True)
salesman_id = fields.Many2one('res.users', 'Salesperson', readonly=True)
state = fields.Char('State', readonly=True)
product_id = fields.Many2one('product.product', 'Product', readonly=True)
order_id = fields.Many2one('sale.order', 'Order', readonly=True)
qty_kitchen = fields.Float('Qty in kitchen', group_operator="avg",
readonly=True)
qty_stock = fields.Float('Stock qty', group_operator="avg", readonly=True)
company_id = fields.Many2one("res.company", "Company", readonly=True)
def init(self, cr):
tools.drop_view_if_exists(cr, self._table)
cr.execute("""
CREATE or REPLACE VIEW sale_order_line_report as (SELECT sol.id as id,
sol.name as name,
sol.order_partner_id as partner_id,
sol.product_uom_qty as product_qty,
sol.product_uom as uom,
sol.price_unit as price_unit,
sol.discount as discount,
sol.salesman_id as salesman_id,
sol.state as state,
sol.order_id as order_id,
sol.company_id as company_id,
q_kt.product_id,
q_kt.qty AS qty_kitchen,
stck.qty AS qty_stock
FROM sale_order_line sol
LEFT JOIN (SELECT product_id,
Sum(qty) AS qty
FROM stock_quant
WHERE location_id IN (SELECT res_id
FROM ir_model_data
WHERE module = 'location_moves' AND name IN ('stock_location_kitchen','stock_location_pantry')
)
GROUP BY product_id) q_kt
ON sol.product_id = q_kt.product_id
LEFT JOIN (SELECT product_id,
Sum(qty) AS qty
FROM stock_quant
WHERE location_id IN (SELECT loc.id
FROM stock_location loc
INNER JOIN (SELECT parent_left,
parent_right
FROM stock_location
WHERE
id IN (select view_location_id from stock_warehouse))
stock
ON loc.parent_left >=
stock.parent_left
AND loc.parent_right <=
stock.parent_right)
GROUP BY product_id) stck
ON sol.product_id = stck.product_id
WHERE q_kt.qty > 0 and sol.id in (select sale_line_id from procurement_order po where po.state not in ('done', 'cancel'))
GROUP BY sol.id, sol.name, sol.order_partner_id, sol.product_uom_qty,
sol.product_uom, sol.price_unit, sol.discount, sol.company_id,
sol.salesman_id, sol.state, sol.order_id, q_kt.product_id, q_kt.qty, stck.qty)
""")
| agpl-3.0 | 516,351,656,280,464,100 | 47.326087 | 137 | 0.550157 | false |
gevannmullins/linux_server | add_items.py | 1 | 4498 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Category, Base, Item, User
engine = create_engine('postgresql://catalog:password@localhost/catalog')
# engine = create_engine('sqlite:///catalog.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create dummy user
User1 = User(name="Caron Mullins", email="[email protected]", picture='https://pbs.twimg.com/profile_images/2671170543/18debd694829ed78203a5a36dd364160_400x400.png')
session.add(User1)
session.commit()
# Category 1 with 3 items
category1 = Category(user_id=1, name="Soccer", image="http://neobasketball.com/img/bballcourt.jpg")
session.add(category1)
session.commit()
item1 = Item(user_id=1, name="Soccer Ball", description="Soccer balls for practicing and match games.", category=category1)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Soccer Boots", description="Soccer boots to maxumise gameplay", category=category1)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Whistles", description="Whistles for training sessions.", category=category1)
session.add(item3)
session.commit()
# Next Categories and its items
category2 = Category(user_id=1, name="Basketball", image="http://neobasketball.com/img/bballcourt.jpg")
session.add(category2)
session.commit()
item1 = Item(user_id=1, name="Crew Socks", description="Stretchy ribbed socks extending to mid calf", category_id = category2.id)
session.add(item1)
session.commit()
# Categories 3
category3 = Category(user_id=1, name="Baseball", image="http://totalsportscomplex.com/wp-content/uploads/2014/09/baseball-pic.jpg")
session.add(category3)
session.commit()
item1 = Item(user_id=1, name="Crew Socks", description="Stretchy ribbed socks extending to mid calf", category_id = category3.id)
session.add(item1)
session.commit()
# Categories 4
category4 = Category(user_id=1, name="Frisbee", image="http://uvmbored.com/wp-content/uploads/2015/10/how_the_frisbee_took_flight.jpg")
session.add(category4)
session.commit()
item1 = Item(user_id=1, name="Flying Disc", description="A Flying disc or a Flying Saucer", category_id = category4.id)
session.add(item1)
session.commit()
# Categories 5
category5 = Category(user_id=1, name="Snowboarding", image="https://pantherfile.uwm.edu/collins9/www/finalproject5/Project_5/snowboarding3.jpg")
session.add(category5)
session.commit()
item1 = Item(user_id=1, name="Snowboard", description="Wooden board suitable to glide on snow", category_id = category5.id)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Goggles", description="Anit-glare protective safety glasses",category_id = category5.id)
session.add(item2)
session.commit()
# Categories 6
category6 = Category(user_id=1, name="Rock Climbing", image="http://asme.berkeley.edu/wordpress/wp-content/uploads/2013/11/Rock-Climbing-Wallpaper-HD.jpg")
session.add(category6)
session.commit()
item1 = Item(user_id=1, name="Shoes", description="Superior performance shoew wtih excellent grip", category_id = category6.id)
session.add(item1)
session.commit()
# Categories 7
category7 = Category(user_id=1, name="Skating", image="http://www.ocasia.org/Images-OCA/During-the-Roller-Skating-XXX-contest-between-XXX-_53834132011574.jpg")
session.add(category7)
session.commit()
item1 = Item(user_id=1, name="Skates", description="Roller skates with bearing suitable for beginner and advanced skater", category_id = category7.id)
session.add(item1)
session.commit()
# Categories 8
category8 = Category(user_id=1, name="Hockey", image="http://www.picture-newsletter.com/street-hockey/street-hockey-39.jpg")
session.add(category8)
session.commit()
item1 = Item(user_id=1, name="Stick", description="Composite Stick favorable for both ice and street hockey", category_id = category8.id)
session.add(item1)
session.commit()
print "added menu items!"
| mit | 530,226,847,194,362,200 | 32.318519 | 174 | 0.765229 | false |
alphagov/backdrop | tests/read/test_parse_request_args.py | 1 | 5035 | from datetime import datetime
import re
import unittest
from hamcrest import assert_that, is_, has_item
import pytz
from werkzeug.datastructures import MultiDict
from backdrop.read.query import parse_request_args
class Test_parse_request_args(unittest.TestCase):
def test_start_at_is_parsed(self):
request_args = MultiDict([
("start_at", "2012-12-12T08:12:43+00:00")])
args = parse_request_args(request_args)
assert_that(args['start_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_first_start_at_is_used(self):
request_args = MultiDict([
("start_at", "2012-12-12T08:12:43+00:00"),
("start_at", "2012-12-13T08:12:43+00:00"),
])
args = parse_request_args(request_args)
assert_that(args['start_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_end_at_is_parsed(self):
request_args = MultiDict([
("end_at", "2012-12-12T08:12:43+00:00")])
args = parse_request_args(request_args)
assert_that(args['end_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_first_end_at_is_used(self):
request_args = MultiDict([
("end_at", "2012-12-12T08:12:43+00:00"),
("end_at", "2012-12-13T08:12:43+00:00"),
])
args = parse_request_args(request_args)
assert_that(args['end_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_one_filter_by_is_parsed(self):
request_args = MultiDict([
("filter_by", "foo:bar")])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item(["foo", "bar"]))
def test_many_filter_by_are_parsed(self):
request_args = MultiDict([
("filter_by", "foo:bar"),
("filter_by", "bar:foo")
])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item(["foo", "bar"]))
assert_that(args['filter_by'], has_item(["bar", "foo"]))
def test_build_query_with_boolean_value(self):
request_args = MultiDict([
("filter_by", "planet:true"),
("filter_by", "star:false"),
])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item([ "planet", True ]))
assert_that(args['filter_by'], has_item([ "star", False ]))
def test_one_filter_by_prefix_is_parsed(self):
request_args = MultiDict([
("filter_by_prefix", "foo:/hello/world")])
args = parse_request_args(request_args)
assert_that(args['filter_by_prefix'],
has_item(["foo", '/hello/world']))
def test_many_filter_by_are_parsed(self):
request_args = MultiDict([
("filter_by_prefix", "foo:bar"),
("filter_by_prefix", "bar:foo")
])
args = parse_request_args(request_args)
assert_that(args['filter_by_prefix'], has_item(["foo", 'bar']))
assert_that(args['filter_by_prefix'], has_item(["bar", 'foo']))
def test_group_by_is_passed_through_untouched(self):
request_args = MultiDict([("group_by", "foobar")])
args = parse_request_args(request_args)
assert_that(args['group_by'], is_(['foobar']))
def test_sort_is_parsed(self):
request_args = MultiDict([
("sort_by", "foo:ascending")])
args = parse_request_args(request_args)
assert_that(args['sort_by'], is_(["foo", "ascending"]))
def test_sort_will_use_first_argument_only(self):
request_args = MultiDict([
("sort_by", "foo:descending"),
("sort_by", "foo:ascending"),
])
args = parse_request_args(request_args)
assert_that(args['sort_by'], is_(["foo", "descending"]))
def test_limit_is_parsed(self):
request_args = MultiDict([
("limit", "123")
])
args = parse_request_args(request_args)
assert_that(args['limit'], is_(123))
def test_one_collect_is_parsed_with_default_method(self):
request_args = MultiDict([
("collect", "some_key")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "default")]))
def test_two_collects_are_parsed_with_default_methods(self):
request_args = MultiDict([
("collect", "some_key"),
("collect", "some_other_key")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "default"),
("some_other_key", "default")]))
def test_one_collect_is_parsed_with_custom_method(self):
request_args = MultiDict([
("collect", "some_key:mean")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "mean")]))
| mit | -5,928,915,904,435,119,000 | 29.70122 | 74 | 0.555511 | false |
imgrant/fit2tcx | fit2tcx.py | 1 | 40690 | #!/usr/bin/env python
#
# fit2tcx - convert a FIT file to a TCX file
#
# Copyright (c) 2012, Gustav Tiger <[email protected]> [https://github.com/Tigge/FIT-to-TCX/]
# Copyright (c) 2014-2016, Ian Grant <[email protected]> [https://github.com/imgrant/fit2tcx]
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__version__ = "1.6"
import sys
import copy
import contextlib
import argparse
import lxml.etree
from datetime import datetime, timedelta
from pytz import timezone, utc
from tzwhere import tzwhere
from geopy.distance import GreatCircleDistance
from fitparse import FitFile, FitParseError
"""
Limit values for error checking on speed & distance calculations
"""
# Speed and distance calculated from GPS will be ignored
# for trackpoints where the acceleration from the last
# point is above this threshold (in m/s^2)
MAX_ACCELERATION = 3.0
"""
FIT to TCX values mapping
"""
LAP_TRIGGER_MAP = {
"manual": "Manual",
"time": "Time",
"distance": "Distance",
"position_start": "Location",
"position_lap": "Location",
"position_waypoint": "Location",
"position_marked": "Location",
"session_end": "Manual",
"fitness_equipment": "Manual"}
INTENSITY_MAP = {
"active": "Active",
"warmup": "Active",
"cooldown": "Active",
"rest": "Resting",
None: "Active"}
PRODUCT_MAP = {
0: "Unknown",
255: "Run Trainer 2.0", # Timex
# Garmin products:
1: "Garmin Connect API", # Also HRM1
2: "AXH01",
2: "AXH01",
4: "AXB02",
5: "HRM2SS",
6: "DSI_ALF02",
473: "Forerunner 301",
474: "Forerunner 301",
475: "Forerunner 301",
494: "Forerunner 301",
717: "Forerunner 405",
987: "Forerunner 405",
782: "Forerunner 50",
988: "Forerunner 60",
1011: "DSI_ALF01",
1018: "Forerunner 310XT",
1446: "Forerunner 310XT",
1036: "Edge 500",
1199: "Edge 500",
1213: "Edge 500",
1387: "Edge 500",
1422: "Edge 500",
1124: "Forerunner 110",
1274: "Forerunner 110",
1169: "Edge 800",
1333: "Edge 800",
1334: "Edge 800",
1497: "Edge 800",
1386: "Edge 800",
1253: "Chirp",
1325: "Edge 200",
1555: "Edge 200",
1328: "Forerunner 910XT",
1537: "Forerunner 910XT",
1600: "Forerunner 910XT",
1664: "Forerunner 910XT",
1765: "Forerunner 920XT",
1341: "ALF04",
1345: "Forerunner 610",
1410: "Forerunner 610",
1360: "Forerunner 210",
1436: "Forerunner 70",
1461: "AMX",
1482: "Forerunner 10",
1688: "Forerunner 10",
1499: "Swim",
1551: "Fenix",
1967: "Fenix 2",
1561: "Edge 510",
1742: "Edge 510",
1821: "Edge 510",
1567: "Edge 810",
1721: "Edge 810",
1822: "Edge 810",
1823: "Edge 810",
1836: "Edge 1000",
1570: "Tempe",
1735: "VIRB Elite",
1736: "Edge Touring",
1752: "HRM Run",
10007: "SDM4",
20119: "Training Center",
1623: "Forerunner 620",
2431: "Forerunner 235"}
"""
TCX schema and namespace values
"""
TCD_NAMESPACE = "http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2"
TCD = "{%s}" % TCD_NAMESPACE
XML_SCHEMA_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance"
XML_SCHEMA = "{%s}" % XML_SCHEMA_NAMESPACE
SCHEMA_LOCATION = \
"http://www.garmin.com/xmlschemas/ActivityExtension/v2 " + \
"http://www.garmin.com/xmlschemas/ActivityExtensionv2.xsd " + \
"http://www.garmin.com/xmlschemas/FatCalories/v1 " + \
"http://www.garmin.com/xmlschemas/fatcalorieextensionv1.xsd " + \
"http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2 " + \
"http://www.garmin.com/xmlschemas/TrainingCenterDatabasev2.xsd"
NSMAP = {
None: TCD_NAMESPACE,
"xsi": XML_SCHEMA_NAMESPACE}
# Class and context manager to suppress stdout for use with tzwhere.
class DummyFile(object):
def write(self, x): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = DummyFile()
yield
sys.stdout = save_stdout
class MyDataProcessor(object):
"""
Custom units data processor for FIT object
"""
def process_type_bool(self, field_data):
if field_data.value is not None:
field_data.value = bool(field_data.value)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
dt = datetime.utcfromtimestamp(631065600 + value)
field_data.value = utc.normalize(dt.replace(tzinfo=utc))
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
dt = datetime.fromtimestamp(631065600 + field_data.value)
field_data.value = utc.normalize(dt.replace(tzinfo=utc))
field_data.units = None
def process_units_semicircles(self, field_data):
if field_data.value is not None:
field_data.value *= 180.0 / (2**31)
field_data.units = 'deg'
class TZDataProcessor(MyDataProcessor):
"""
Extra data processor layer for working with timezones.
For the Timex Run Trainer 2.0, date-times claim to be UTC (as per the FIT
format spec), but are actually an (unknown) local timezone.
If the data processor is called with a lat,lon point, we look up the true
timezone and re-normalize date-times to UTC.
Otherwise, if the data processor is called with a timezone name (defaults
to UTC, i.e. no difference), we use that and re-normalize.
"""
def __init__(self, lat=None, lon=None, tzname="UTC"):
if lat is not None and lon is not None:
with nostdout():
w = tzwhere.tzwhere()
self.tz = timezone(w.tzNameAt(lat, lon))
else:
self.tz = timezone(tzname)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
dt = datetime.utcfromtimestamp(631065600 + value)
dt = self.tz.localize(dt)
field_data.value = utc.normalize(dt)
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
dt = datetime.fromtimestamp(631065600 + field_data.value)
dt = self.tz.localize(dt)
field_data.value = utc.normalize(dt)
field_data.units = None # Units were 's', set to None
def iso_Z_format(dt):
iso = dt.isoformat()
z_iso = iso.replace("+00:00", "Z")
return z_iso
def sum_distance(activity,
start_time=datetime(1899, 1, 1, 0, 0, 1, tzinfo=utc),
end_time=datetime(2189, 12, 31, 23, 59, 59, tzinfo=utc)):
"""
Calculate distance from GPS data for an activity
"""
# First build tps array (using timestamp as the index)
# in order to coalesce values at the same timepoint
# under a single trackpoint element
tps = {}
fit_epoch = datetime(1989, 12, 31, 0, 0, 0, tzinfo=utc)
for trackpoint in activity.get_messages('record'):
tts = trackpoint.get_value("timestamp")
tsi = int((tts - fit_epoch).total_seconds())
if tps.get(tsi) is None:
tps[tsi] = {
'timestamp': tts,
'distance': None,
'position_lat': None,
'position_long': None}
for var in ['distance',
'position_lat',
'position_long']:
if trackpoint.get_value(var) is not None:
tps[tsi][var] = trackpoint.get_value(var)
# For mid-activity laps, iterate through trackpoints to
# grab the first point before the start of the lap, also
# delete points that are not part of the lap
prev = None
for timestamp in sorted(tps, reverse=True):
tp = tps[timestamp]
if tp['timestamp'] < start_time and prev is None:
prev = copy.copy(tp)
if tp['timestamp'] < start_time or tp['timestamp'] > end_time:
del tps[timestamp]
# Then loop over tps array to calculate cumulative point-to-point
# distance from GPS data. Existing distance data (e.g. from footpod)
# is used when there is no GPS position available or it is bad.
distance = 0.0
for timestamp in sorted(tps):
tp = tps[timestamp]
if prev is not None:
if prev['distance'] is None:
prev_dist = 0
else:
prev_dist = prev['distance']
if not None in (tp['position_lat'],
tp['position_long'],
prev['position_lat'],
prev['position_long']):
try:
tp_timedelta = (tp['timestamp'] -
prev['timestamp']).total_seconds()
gps_dist = GreatCircleDistance(
(tp['position_lat'],
tp['position_long']),
(prev['position_lat'],
prev['position_long'])
).meters
gps_speed = (gps_dist / tp_timedelta)
# Fallback to existing distance/speed stream data
# if the GPS data looks erroneous (acceleration test)
if (gps_speed / tp_timedelta) > MAX_ACCELERATION:
gps_dist = tp['distance'] - prev_dist
except:
# Fallback to existing distance stream data on error
gps_dist = tp['distance'] - prev_dist
else:
# Fallback to existing distance stream data if no GPS coords
gps_dist = tp['distance'] - prev_dist
distance += gps_dist
prev = tp
return distance
def create_element(tag, text=None, namespace=None):
"""Create a free element"""
namespace = NSMAP[namespace]
tag = "{%s}%s" % (namespace, tag)
element = lxml.etree.Element(tag, nsmap=NSMAP)
if text is not None:
element.text = text
return element
def create_sub_element(parent, tag, text=None, namespace=None):
"""Create an element as a child of an existing given element"""
element = create_element(tag, text, namespace)
parent.append(element)
return element
def create_document():
"""Create a TCX XML document"""
document = create_element("TrainingCenterDatabase")
document.set(XML_SCHEMA + "schemaLocation", SCHEMA_LOCATION)
document = lxml.etree.ElementTree(document)
return document
def add_author(document):
"""Add author element (TCX writer) to TCX"""
author = create_sub_element(document.getroot(), "Author")
author.set(XML_SCHEMA + "type", "Application_t")
create_sub_element(author, "Name", "fit2tcx Converter")
build = create_sub_element(author, "Build")
version = create_sub_element(build, "Version")
vMajor, vMinor = tuple(map(int, (__version__.split("."))))
create_sub_element(version, "VersionMajor", str(vMajor))
create_sub_element(version, "VersionMinor", str(vMinor))
create_sub_element(version, "BuildMajor", "0")
create_sub_element(version, "BuildMinor", "0")
create_sub_element(author, "LangID", "en")
create_sub_element(author, "PartNumber", "000-00000-00")
def add_creator(element, manufacturer, product_name, product_id, serial):
"""Add creator element (recording device) to TCX activity"""
creator = create_sub_element(element, "Creator")
creator.set(XML_SCHEMA + "type", "Device_t")
create_sub_element(creator, "Name", manufacturer + " " + product_name)
unitID = int(serial or 0)
create_sub_element(creator, "UnitId", str(unitID))
# Set ProductID to 0 for non-Garmin devices
if manufacturer != "Garmin":
product_id = 0
create_sub_element(creator, "ProductID", str(product_id))
version = create_sub_element(creator, "Version")
create_sub_element(version, "VersionMajor", "0")
create_sub_element(version, "VersionMinor", "0")
create_sub_element(version, "BuildMajor", "0")
create_sub_element(version, "BuildMinor", "0")
def add_notes(element, text):
"""Add notes element to TCX activity"""
create_sub_element(element, "Notes", text)
def add_trackpoint(element, trackpoint, sport):
"""Create a trackpoint element"""
timestamp = trackpoint['timestamp']
pos_lat = trackpoint['position_lat']
pos_long = trackpoint['position_long']
distance = trackpoint['distance']
altitude = trackpoint['altitude']
speed = trackpoint['speed']
heart_rate = trackpoint['heart_rate']
cadence = trackpoint['cadence']
create_sub_element(element, "Time", iso_Z_format(timestamp))
if pos_lat is not None and pos_long is not None:
pos = create_sub_element(element, "Position")
create_sub_element(pos, "LatitudeDegrees", "{:.6f}".format(pos_lat))
create_sub_element(pos, "LongitudeDegrees", "{:.6f}".format(pos_long))
if altitude is not None:
create_sub_element(element, "AltitudeMeters", str(altitude))
if distance is not None:
create_sub_element(element, "DistanceMeters", str(distance))
if heart_rate is not None:
heartrateelem = create_sub_element(element, "HeartRateBpm")
heartrateelem.set(XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str(heart_rate))
if speed is not None or cadence is not None:
if cadence is not None and sport == "Biking":
# Bike cadence is stored in main trackpoint element,
# not an extension, unlike running cadence (below)
create_sub_element(element, "Cadence", str(cadence))
exelem = create_sub_element(element, "Extensions")
tpx = create_sub_element(exelem, "TPX")
tpx.set("xmlns",
"http://www.garmin.com/xmlschemas/ActivityExtension/v2")
if speed is not None:
create_sub_element(tpx, "Speed", str(speed))
if cadence is not None:
if sport == "Running":
tpx.set("CadenceSensor", "Footpod")
create_sub_element(tpx, "RunCadence", str(cadence))
elif sport == "Biking":
tpx.set("CadenceSensor", "Bike")
def add_lap(element,
activity,
lap,
sport,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
fixed_distance,
activity_scaling_factor,
total_cumulative_distance):
"""Add a lap element to a TCX document"""
# Only process laps with timestamps - this serves as a workaround for
# extra fake/empty laps in FIT files from the Timex Run Trainer 2.0
if lap.get_value('timestamp') is not None:
lap_num = lap.get_value("message_index") + 1
start_time = lap.get_value("start_time")
end_time = lap.get_value("timestamp")
totaltime = lap.get_value("total_elapsed_time")
stored_distance = lap.get_value("total_distance")
calculated_distance = sum_distance(activity, start_time, end_time)
if fixed_distance is not None:
reference_distance = fixed_distance
else:
reference_distance = calculated_distance
try:
lap_scaling_factor = reference_distance / stored_distance
except ZeroDivisionError:
lap_scaling_factor = 1.00
if calibrate and per_lap_cal:
scaling_factor = lap_scaling_factor
else:
scaling_factor = activity_scaling_factor
max_speed = lap.get_value("max_speed")
avg_speed = lap.get_value("avg_speed")
calories = lap.get_value("total_calories")
avg_heart = lap.get_value("avg_heart_rate")
max_heart = lap.get_value("max_heart_rate")
intensity = INTENSITY_MAP[lap.get_value("intensity")]
avg_cadence = lap.get_value("avg_cadence")
max_cadence = lap.get_value("max_cadence")
if lap.get_value("lap_trigger"):
triggermet = LAP_TRIGGER_MAP[lap.get_value("lap_trigger")]
else:
triggermet = LAP_TRIGGER_MAP["manual"]
lapelem = create_sub_element(element, "Lap")
lapelem.set("StartTime", iso_Z_format(start_time))
#
# TotalTimeSeconds
#
create_sub_element(lapelem, "TotalTimeSeconds", str("%d" % totaltime))
#
# DistanceMeters
#
lap_dist_elem = create_sub_element(lapelem,
"DistanceMeters",
str("%d" % stored_distance)
)
#
# MaximumSpeed
#
lap_max_spd_elem = create_sub_element(lapelem,
"MaximumSpeed",
str("%.3f" % max_speed))
#
# Calories
#
create_sub_element(lapelem, "Calories", str("%d" % calories))
#
# AverageHeartRateBpm
#
if avg_heart is not None:
heartrateelem = create_sub_element(lapelem, "AverageHeartRateBpm")
heartrateelem.set(
XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str("%d" % avg_heart))
#
# MaximumHeartRateBpm
#
if max_heart is not None:
heartrateelem = create_sub_element(lapelem, "MaximumHeartRateBpm")
heartrateelem.set(
XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str("%d" % max_heart))
#
# Intensity
#
create_sub_element(lapelem, "Intensity", intensity)
#
# Cadence (bike)
#
if avg_speed or avg_cadence or max_cadence:
if sport == "Biking" and avg_cadence is not None:
# Average bike cadence is stored in main lap element,
# not as an extension, unlike average running cadence (below)
create_sub_element(lapelem, "Cadence", str("%d" % avg_cadence))
#
# TriggerMethod
#
create_sub_element(lapelem, "TriggerMethod", triggermet)
if dist_recalc:
distance_used = calculated_distance
elif calibrate:
if fixed_distance is not None:
distance_used = fixed_distance
else:
distance_used = stored_distance * scaling_factor
else:
distance_used = stored_distance
#
# Track
#
trackelem = create_sub_element(lapelem, "Track")
# First build tps array (using timestamp as the index)
# in order to coalesce values at the same timepoint
# under a single trackpoint element
tps = {}
fit_epoch = datetime(1989, 12, 31).replace(tzinfo=utc)
for trackpoint in activity.get_messages('record'):
tts = trackpoint.get_value("timestamp")
tsi = int((tts - fit_epoch).total_seconds())
if tps.get(tsi) is None:
tps[tsi] = {
'timestamp': tts,
'cadence': None,
'distance': None,
'position_lat': None,
'position_long': None,
'heart_rate': None,
'altitude': None,
'speed': None}
for var in ['cadence',
'distance',
'position_lat',
'position_long',
'heart_rate',
'altitude',
'speed']:
if trackpoint.get_value(var) is not None:
tps[tsi][var] = trackpoint.get_value(var)
# Iterate through all trackpoints to grab the first point before the
# start of the lap, then delete points that are not part of the lap
prev = None
for timestamp in sorted(tps, reverse=True):
tp = tps[timestamp]
if tp['timestamp'] < start_time and prev is None:
prev = copy.copy(tp)
if tp['timestamp'] < start_time or tp['timestamp'] > end_time:
del tps[timestamp]
# Then process all trackpoints for this lap, recalculating speed &
# distance from GPS and adjusting if requested, before adding element
stored_avg_speed = copy.copy(avg_speed)
stored_max_speed = copy.copy(max_speed)
distance = 0.0
max_speed = 0.0
tp_speed = None
for timestamp in sorted(tps):
tp = tps[timestamp]
trackpointelem = create_sub_element(trackelem, "Trackpoint")
if prev is not None:
if prev['distance'] is None:
prev['distance'] = 0
try:
tp_timedelta = (tp['timestamp'] -
prev['timestamp']).total_seconds()
gps_dist = GreatCircleDistance(
(tp['position_lat'],
tp['position_long']),
(prev['position_lat'],
prev['position_long'])
).meters
gps_speed = (gps_dist / tp_timedelta)
# Fallback to existing distance/speed stream data
# if the GPS data looks erroneous (acceleration test)
if (gps_speed / tp_timedelta) > MAX_ACCELERATION:
gps_speed = tp['speed']
gps_dist = tp['distance'] - prev['distance']
except:
gps_speed = tp['speed']
gps_dist = tp['distance'] - prev['distance']
if dist_recalc:
tp_dist = gps_dist
elif calibrate:
tp_dist = (
tp['distance'] - prev['distance']) * scaling_factor
else:
tp_dist = tp['distance'] - prev['distance']
try:
if speed_recalc:
tp_speed = gps_speed
elif calibrate:
tp_speed = tp['speed'] * scaling_factor
else:
tp_speed = tp['speed']
total_cumulative_distance += tp_dist
distance += tp_dist
if tp_speed > max_speed:
max_speed = tp_speed
except TypeError:
tp_speed = None
# Store previous trackpoint before changing the current one
prev = copy.copy(tp)
# Adjust trackpoint distance & speed values if requested
if ((dist_recalc or calibrate)
and tp['distance'] is not None
and total_cumulative_distance is not None):
tp['distance'] = "{:.1f}".format(total_cumulative_distance)
if ((speed_recalc or calibrate)
and tp['speed'] is not None
and tp_speed is not None):
tp['speed'] = "{:.3f}".format(tp_speed)
# Add trackpoint element
add_trackpoint(trackpointelem, tp, sport)
#
# Notes
#
if fixed_distance is not None:
precision_str = ("; known distance: {ref_dist:.3f} km "
"(FIT precision: {fit_precision:.1f}%; "
"GPS/footpod precision: {gps_precision:.1f}%)")
reference = "known distance"
else:
precision_str = " (precision: {precision:.1f}%)"
reference = "GPS/footpod"
try:
fit_precision_calc = (1 - (abs(reference_distance -
stored_distance) /
reference_distance)) * 100
gps_precision_calc = (1 - (abs(reference_distance -
calculated_distance) /
reference_distance)) * 100
precision_calc = (1 - (abs(calculated_distance -
stored_distance) /
calculated_distance)) * 100
except ZeroDivisionError:
fit_precision_calc = 100
gps_precision_calc = 100
precision_calc = 100
notes = ("Lap {lap_number:d}: {distance_used:.3f} km in {total_time!s}\n"
"Distance in FIT file: {fit_dist:.3f} km; "
"calculated via GPS/footpod: {gps_dist:.3f} km"
+ precision_str + "\n"
"Footpod calibration factor setting: {old_cf:.1f}%; "
"new factor based on {reference} for this lap: {new_cf:.1f}%"
).format(lap_number=lap_num,
distance_used=distance_used / 1000,
total_time=timedelta(seconds=int(totaltime)),
fit_dist=stored_distance / 1000,
gps_dist=calculated_distance / 1000,
ref_dist=reference_distance / 1000,
fit_precision=fit_precision_calc,
gps_precision=gps_precision_calc,
precision=precision_calc,
old_cf=current_cal_factor,
reference=reference,
new_cf=lap_scaling_factor * current_cal_factor)
add_notes(lapelem, notes)
#
# Extensions (AvgSpeed, AvgRunCadence, MaxRunCadence, MaxBikeCadence)
#
if not all(var is None for var in (avg_speed, avg_cadence, max_cadence)):
exelem = create_sub_element(lapelem, "Extensions")
lx = create_sub_element(exelem, "LX")
lx.set("xmlns",
"http://www.garmin.com/xmlschemas/ActivityExtension/v2")
if avg_speed is not None:
lap_avg_spd_elem = create_sub_element(lx,
"AvgSpeed",
str("%.3f" % avg_speed))
if avg_cadence is not None and sport == "Running":
create_sub_element(lx,
"AvgRunCadence",
str("%d" % avg_cadence))
if max_cadence is not None:
if sport == "Running":
create_sub_element(lx,
"MaxRunCadence",
str("%d" % max_cadence))
elif sport == "Biking":
create_sub_element(lx,
"MaxBikeCadence",
str("%d" % max_cadence))
# Adjust overall lap distance & speed values if required
if calibrate:
# Manual distance:
if fixed_distance is not None:
lap_dist_elem.text = "{:d}".format(int(fixed_distance))
lap_avg_spd_elem.text = "{:.3f}".format(
fixed_distance / totaltime)
else:
lap_dist_elem.text = "{:d}".format(
int(stored_distance * scaling_factor))
lap_avg_spd_elem.text = "{:.3f}".format(
stored_avg_speed * scaling_factor)
lap_max_spd_elem.text = "{:.3f}".format(
stored_max_speed * scaling_factor)
# GPS recalculation options override calibration:
if dist_recalc:
lap_dist_elem.text = "{:d}".format(int(distance))
if speed_recalc:
lap_avg_spd_elem.text = "{:.3f}".format(distance / totaltime)
lap_max_spd_elem.text = "{:.3f}".format(max_speed)
return distance
else:
return 0
def add_activity(element,
session,
activity,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
manual_lap_distance,
activity_scaling_factor):
"""Add an activity to a TCX document"""
# Sport type
sport = session.get_value("sport")
sport_mapping = {"running": "Running", "cycling": "Biking"}
sport = sport_mapping[sport] if sport in sport_mapping else "Other"
actelem = create_sub_element(element, "Activity")
actelem.set("Sport", sport)
create_sub_element(actelem,
"Id",
iso_Z_format(session.get_value("start_time")))
total_cumulative_distance = 0.0
lap_num = 0
for lap in activity.get_messages('lap'):
if lap.get_value("start_time") == lap.get_value("timestamp"):
continue # skip very short laps that won't have any data
if manual_lap_distance is not None:
try:
fixed_dist = manual_lap_distance[lap_num]
except IndexError:
fixed_dist = None
else:
fixed_dist = None
lap_dist = add_lap(actelem,
activity,
lap,
sport,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
fixed_dist,
activity_scaling_factor,
total_cumulative_distance)
total_cumulative_distance += lap_dist
lap_num += 1
return (actelem, total_cumulative_distance)
def convert(filename,
time_zone="auto",
dist_recalc=False,
speed_recalc=False,
calibrate=False,
per_lap_cal=False,
manual_lap_distance=None,
current_cal_factor=100.0):
"""Convert a FIT file to TCX format"""
# Calibration requires either GPS recalculation or manual lap distance(s):
if calibrate and not dist_recalc and manual_lap_distance is None:
sys.stderr.write("Calibration requested, enabling distance recalculation from GPS/footpod.\n")
dist_recalc = True
# Calibration with manual lap distances implies
# per-lap calibration:
if calibrate and manual_lap_distance is not None:
per_lap_cal = True
document = create_document()
element = create_sub_element(document.getroot(), "Activities")
try:
if time_zone == "auto":
# We need activity object to be able to get trackpoints,
# before re-creating activity again with timezone info
activity = FitFile(filename,
check_crc=False,
data_processor=MyDataProcessor())
activity.parse()
lat = None
lon = None
for trackpoint in activity.get_messages('record'):
if lat is not None and lon is not None:
break
lat = trackpoint.get_value("position_lat")
lon = trackpoint.get_value("position_long")
if lat is not None and lon is not None:
activity = FitFile(filename,
check_crc=False,
data_processor=TZDataProcessor(lat=lat,
lon=lon))
else:
activity = FitFile(filename,
check_crc=False,
data_processor=TZDataProcessor(tzname=time_zone))
activity.parse()
session = next(activity.get_messages('session'))
total_activity_distance = session.get_value('total_distance')
total_calculated_distance = sum_distance(activity)
activity_scaling_factor = (total_calculated_distance /
total_activity_distance)
new_cal_factor = activity_scaling_factor * current_cal_factor
actelem, total_distance = add_activity(element,
session,
activity,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
manual_lap_distance,
activity_scaling_factor)
except FitParseError as e:
sys.stderr.write(str("Error while parsing .FIT file: %s" % e) + "\n")
sys.exit(1)
if dist_recalc:
distance_used = total_calculated_distance
elif calibrate:
distance_used = total_distance
else:
distance_used = total_activity_distance
method = ""
if dist_recalc or speed_recalc or calibrate:
parts = []
if calibrate:
if per_lap_cal:
parts.append("calibration applied per lap")
else:
parts.append("calibration applied")
if dist_recalc and speed_recalc:
parts.append("speed and distance recalculated")
elif dist_recalc:
parts.append("distance recalculated")
elif speed_recalc:
parts.append("speed recalculated")
if calibrate and manual_lap_distance is not None:
reference = " from known distance (with GPS fill-in)"
elif dist_recalc or speed_recalc:
reference = " from GPS/footpod"
method = "(" + ", ".join(parts) + reference + ")"
notes = ("{total_laps:d} laps: {distance_used:.3f} km in {total_time!s} {dist_method:s}\n"
"Distance in FIT file: {fit_dist:.3f} km; "
"calculated via GPS/footpod: {gps_dist:.3f} km "
"(precision: {precision:.1f}%)\n"
"Footpod calibration factor setting: {old_cf:.1f}%; "
"new factor based on recomputed distance: {new_cf:.1f}%"
).format(total_laps=session.get_value('num_laps'),
distance_used=distance_used / 1000,
total_time=timedelta(seconds=int(session.get_value(
'total_timer_time'))),
fit_dist=total_activity_distance / 1000,
gps_dist=total_calculated_distance / 1000,
precision=(1 - (abs(total_calculated_distance -
total_activity_distance) /
total_calculated_distance)) * 100,
old_cf=current_cal_factor,
new_cf=new_cal_factor,
dist_method=method)
add_notes(actelem, notes)
try:
dinfo = next(activity.get_messages('device_info'))
manufacturer = dinfo.get_value('manufacturer').title().replace('_', ' ')
product_name = dinfo.get_value('descriptor').replace('_', ' ')
product_id = dinfo.get_value('product')
serial_number = dinfo.get_value('serial_number')
except: # if no device_info message, StopIteration is thrown
fid = next(activity.get_messages('file_id'))
manufacturer = fid.get_value('manufacturer').title().replace('_', ' ')
product_id = fid.get_value('product')
product_name = PRODUCT_MAP[product_id] if product_id in PRODUCT_MAP else product_id
serial_number = fid.get_value('serial_number')
add_creator(actelem,
manufacturer,
product_name,
product_id,
serial_number
)
add_author(document)
return document
def main():
"""Read arguments from command line to convert FIT file to TCX"""
parser = argparse.ArgumentParser(prog="fit2tcx")
parser.add_argument("FitFile", help="Input FIT file")
parser.add_argument("TcxFile", help="Output TCX file")
parser.add_argument(
"-v",
"--version",
action='version',
version='%(prog)s {version}'.format(version=__version__))
parser.add_argument(
"-z",
"--timezone",
action="store",
type=str,
default="auto",
help="Specify the timezone for FIT file timestamps (default, 'auto', uses GPS data to lookup the local timezone)")
parser.add_argument(
"-d",
"--recalculate-distance-from-gps",
action="store_true",
help="Recalculate distance from GPS data")
parser.add_argument(
"-s",
"--recalculate-speed-from-gps",
action="store_true",
help="Recalculate speed from GPS data")
parser.add_argument(
"-c",
"--calibrate-footpod",
action="store_true",
help="Use GPS-measured and/or known distance to calibrate footpod data")
parser.add_argument(
"-p",
"--per-lap-calibration",
action="store_true",
help="Apply footpod calibration on a per lap basis")
parser.add_argument(
"-l",
"--manual-lap-distance",
action="append",
default=None,
type=float,
help="Manually specify known lap distance(s) (in metres, use calibration to apply)")
parser.add_argument(
"-f",
"--calibration-factor",
action="store",
default=100.0,
type=float,
help="Existing calibration factor (defaults to 100.0)")
args = parser.parse_args()
if (args.calibrate_footpod and
not args.recalculate_distance_from_gps and
not args.manual_lap_distance):
parser.error("-c (--calibrate-footpod) requires either -d (--recalculate-distance-from-gps) or -l (--manual-lap-distance)")
return 1
try:
document = convert(args.FitFile,
args.timezone,
args.recalculate_distance_from_gps,
args.recalculate_speed_from_gps,
args.calibrate_footpod,
args.per_lap_calibration,
args.manual_lap_distance,
args.calibration_factor)
activity_notes = document.getroot().findtext(".//{*}Activity/{*}Notes")
if activity_notes is not None:
sys.stdout.write(str(activity_notes) + "\n")
tcx = open(args.TcxFile, 'wb')
tcx.write(lxml.etree.tostring(document.getroot(),
pretty_print=True,
xml_declaration=True,
encoding="UTF-8"))
return 0
except FitParseError as exception:
sys.stderr.write(str(exception) + "\n")
return 1
if __name__ == "__main__":
sys.exit(main())
| mit | 1,875,265,608,820,062,500 | 36.641073 | 131 | 0.541239 | false |
gkabbe/cMDLMC | mdlmc/IO/converters.py | 1 | 1764 | # coding=utf-8
import logging
import os
import pathlib
import tables
import h5py
import daiquiri
import fire
import numpy as np
from typing import Union, Iterable
from ..atoms.numpy_atom import dtype_xyz
from ..atoms import numpy_atom as npa
from ..IO.trajectory_parser import XYZTrajectory
logger = logging.getLogger(__name__)
def save_xyz_to_hdf5(xyz_fname, hdf5_fname=None, *, remove_com_movement=False,
dataset_name="trajectory", selection=None):
"""
Note: HDF5 with Blosc compression currently only works if h5py and pytables are installed via
conda!"""
xyz = XYZTrajectory(xyz_fname, selection=selection)
logger.info("Determine length of xyz trajectory.")
trajectory_length = len(xyz)
first_frame = next(iter(xyz))
frame_shape = first_frame.atom_positions.shape
atom_names = first_frame.atom_names.astype("S")
logger.info("Names: %s", atom_names)
if not hdf5_fname:
hdf5_fname = os.path.splitext(xyz_fname)[0] + ".hdf5"
with h5py.File(hdf5_fname, "w") as hdf5_file:
# Use blosc compression (needs tables import and code 32001)
traj_atomnames = hdf5_file.create_dataset("atom_names", atom_names.shape, dtype="2S")
traj_atomnames[:] = atom_names
traj = hdf5_file.create_dataset(dataset_name, shape=(trajectory_length, *frame_shape),
dtype=np.float32, compression=32001)
for i, xyz_frame in enumerate(xyz):
if remove_com_movement:
npa.remove_center_of_mass_movement(xyz_frame)
if i % 1000 == 0:
logger.info("Frame %i", i)
traj[i] = xyz_frame.atom_positions
def main():
daiquiri.setup(level=logging.INFO)
fire.Fire()
| gpl-3.0 | -4,830,993,265,289,770,000 | 31.072727 | 97 | 0.652494 | false |
chromium2014/src | tools/perf/page_sets/intl_ko_th_vi.py | 1 | 1913 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class IntlKoThViPage(page_module.Page):
def __init__(self, url, page_set):
super(IntlKoThViPage, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/intl_ko_th_vi.json'
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class IntlKoThViPageSet(page_set_module.PageSet):
""" Popular pages in Korean, Thai and Vietnamese. """
def __init__(self):
super(IntlKoThViPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/intl_ko_th_vi.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
# Why: #7 site in Vietnam
'http://us.24h.com.vn/',
# Why: #6 site in Vietnam
'http://vnexpress.net/',
# Why: #18 site in Vietnam
'http://vietnamnet.vn/',
# Why: #5 site in Vietnam
# pylint: disable=C0301
'http://news.zing.vn/the-gioi/ba-dam-thep-margaret-thatcher-qua-doi/a312895.html#home_noibat1',
'http://kenh14.vn/home.chn',
# Why: #5 site in Korea
'http://www.naver.com/',
# Why: #9 site in Korea
'http://www.daum.net/',
# Why: #25 site in Korea
'http://www.donga.com/',
'http://www.chosun.com/',
'http://www.danawa.com/',
# Why: #10 site in Thailand
'http://pantip.com/',
'http://thaimisc.com/'
]
for url in urls_list:
self.AddPage(IntlKoThViPage(url, self))
| bsd-3-clause | -6,543,040,283,645,747,000 | 31.423729 | 101 | 0.644015 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.