code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import shelve
import os
# Many more at:
# http://www.garykessler.net/library/file_sigs.html
# http://www.garykessler.net/library/magic.html
smudges = {
'jpg': {
'offset': 0,
'magic': '\xFF\xD8\xFF'
},
'png': {
'offset': 0,
'magic': '\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'
},
'gif': {
'offset': 0,
'magic': '\x47\x49\x46\x38\x39\x61'
},
'pdf': {
'offset': 0,
'magic': '\x25\x50\x44\x46'
},
'exe': {
'offset': 0,
'magic': '\x4D\x5A'
},
'tar': {
'offset': 257,
'magic': '\x75\x73\x74\x61\x72\x20\x20\x00'
},
'3gp': {
'offset': 4,
'magic': '\x66\x74\x79\x70\x33\x67'
}
}
def populate_smudge_db():
db_path = os.path.join(
os.path.dirname(__file__), 'smudge')
db = shelve.open(db_path)
db.clear()
db.update(smudges)
db.close()
print('Smudge DB Populated')
if __name__ == '__main__':
populate_smudge_db()
| leonjza/filesmudge | filesmudge/populate.py | Python | mit | 1,009 |
# -*- coding: utf-8
"""
ain7/news/models.py
"""
#
# Copyright © 2007-2018 AIn7 Devel Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import datetime
from django.core.urlresolvers import reverse
from django.db import models
from django.template import defaultfilters
from django.utils.translation import ugettext as _
from ain7.annuaire.models import Person
from ain7.utils import LoggedClass
class EventOrganizer(models.Model):
"""event organizer"""
event = models.ForeignKey('news.NewsItem', verbose_name=_('event'),
related_name='event_organizers')
organizer = models.ForeignKey(Person, verbose_name=_('organizer'),
related_name='organized_events')
send_email_for_new_subscriptions = models.BooleanField(default=False,
verbose_name=_('send email for new subscription'))
class RSVPAnswer(models.Model):
person = models.ForeignKey('annuaire.Person')
event = models.ForeignKey('news.NewsItem')
yes = models.BooleanField(default=False)
no = models.BooleanField(default=False)
maybe = models.BooleanField(default=False)
number = models.IntegerField(verbose_name=_('number of persons'), default=1)
payment = models.ForeignKey('shop.Payment', null=True, blank=True)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey('annuaire.Person', related_name='rsvpanswers_created')
updated_on = models.DateTimeField(auto_now=True)
updated_by = models.ForeignKey('annuaire.Person', related_name='rsvpanswers_updated')
def answer(self):
if self.yes:
return _('yes')
if self.no:
return _('no')
if self.maybe:
return _('maybe')
class NewsItemManager(models.Manager):
"""news item manager"""
def next_events(self):
"""Returns all future events."""
return self.filter(date__gte=datetime.datetime.now())
class NewsItem(LoggedClass):
"""news item"""
STATUS = (
(0,_('project')),
(1,_('confirmed')),
(2,_('cancel')),
)
slug = models.SlugField(max_length=100, unique=True)
title = models.CharField(verbose_name=_('title'), max_length=100, unique=True)
body = models.TextField(verbose_name=_('body'))
shorttext = models.CharField(verbose_name=_('short text'), max_length=500,
blank=True, null=True)
image = models.ImageField(verbose_name=_('image'), upload_to='data',
blank=True, null=True)
creation_date = models.DateTimeField(verbose_name=_('date'),
default=datetime.datetime.today, editable=False)
front_page_presence = models.BooleanField(_('Front Page Presence'), default=True)
# to which group we should link this news
groups = models.ManyToManyField('groups.Group',
verbose_name=_('groups'), related_name='events',
blank=True)
# those fields are only present for an event
date = models.DateTimeField(verbose_name=_('date'), blank=True, null=True)
location = models.CharField(verbose_name=_('location'), max_length=60,
blank=True, null=True)
status = models.IntegerField(verbose_name=_('status'), choices=STATUS,
blank=True, null=True)
contact_email = models.EmailField(verbose_name=_('contact email'),
max_length=50, blank=True, null=True)
link = models.CharField(verbose_name=_('external link'), max_length=60,
blank=True, null=True)
# organizers = models.ManyToManyField(Person, verbose_name=_('organizers'),
# related_name='events', blank=True, null=True, through='EventOrganizer')
pictures_gallery = models.CharField(verbose_name=_('Pictures gallery'),
max_length=100, blank=True, null=True)
package = models.ForeignKey('shop.Package', blank=True, null=True)
rsvp_question = models.CharField(verbose_name=_('extra question'),
max_length=100, blank=True, null=True)
rsvp_begin = models.DateField(verbose_name=_('rsvp begin'),
blank=True, null=True)
rsvp_end = models.DateField(verbose_name=_('rsvp end'),
blank=True, null=True)
rsvp_multiple = models.BooleanField(default=True)
objects = NewsItemManager()
def __unicode__(self):
"""news item unicode method"""
return self.title
def get_absolute_url(self):
"""news item url"""
if self.date:
return reverse('event-details', args=[self.id])
else:
return reverse('news-details', args=[self.slug])
def save(self):
"""news item save method"""
if self.pictures_gallery:
if not self.pictures_gallery.startswith('http://'):
self.pictures_gallery = 'http://'+self.pictures_gallery
self.slug = defaultfilters.slugify(self.title)
super(NewsItem, self).save()
def rsvp_answer(self, person, yes=False, no=False, maybe=False):
"""define a rsvp answer to an event"""
rsvp = None
if RSVPAnswer.objects.filter(person=person, event=self).count() == 1:
rsvp = RSVPAnswer.objects.get(person=person, event=self)
rsvp.no = no
rsvp.yes = yes
rsvp.maybe = maybe
rsvp.updated_by = person
else:
rsvp = RSVPAnswer(person=person, event=self,
created_by=person, updated_by=person,
no=no, yes=yes, maybe=maybe, number=0)
if yes:
rsvp.number = 1
rsvp.save()
return rsvp
def attendees(self):
"""return event attendees"""
return self.RSVAnswers.filter(yes=True)
def attendeees_number(self):
"""Renvoie le nombre de participants à l'événement."""
nbpart = 0
for sub in self.RSVPAnswers.filter(yes=True):
nbpart += sub.number
return nbpart
class Meta:
"""news item meta information"""
ordering = ['-creation_date']
verbose_name = _('news item')
| ain7/www.ain7.org | ain7/news/models.py | Python | lgpl-2.1 | 6,657 |
# Load the siteconf module
from django.conf import settings
from django.utils.importlib import import_module
SITECONF_MODULE = getattr(settings, 'AUTOLOAD_SITECONF', settings.ROOT_URLCONF)
import_module(SITECONF_MODULE)
| luxnovalabs/enjigo_door | web_interface/autoload/models.py | Python | unlicense | 220 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
import os
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import param_domain
from core.tests import test_utils
import feconf
import utils
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with the YAML generation
# methods tested below.
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents:
bottom: []
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: New state
feedback: []
param_changes: []
trigger:
customization_args:
num_submits:
value: 42
trigger_type: NthResubmission
id: null
param_changes: []
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents: {}
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: New state
feedback: []
param_changes: []
trigger:
customization_args:
num_submits:
value: 42
trigger_type: NthResubmission
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_YAML_CONTENT_WITH_GADGETS = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents:
bottom:
- customization_args:
adviceObjects:
value:
- adviceTitle: b
adviceHtml: <p>c</p>
gadget_type: TestGadget
gadget_name: ATestGadget
visible_in_states:
- New state
- Second state
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
Second state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: Second state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
TEST_GADGETS = {
'TestGadget': {
'dir': os.path.join(feconf.GADGETS_DIR, 'TestGadget')
}
}
TEST_GADGET_CUSTOMIZATION_ARGS = {
'adviceObjects': {
'value': [{
'adviceTitle': 'b',
'adviceHtml': '<p>c</p>'
}]
}
}
TEST_GADGET_DICT = {
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS,
'visible_in_states': ['First state']
}
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
# TODO(bhenning): The validation tests below should be split into separate
# unit tests. Also, all validation errors should be covered in the tests.
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.init_state_name = ''
exploration.states = {}
exploration.title = 'Hello #'
self._assert_validation_error(exploration, 'Invalid character #')
exploration.title = 'Title'
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = exp_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
self._assert_validation_error(
exploration, 'Invalid character / in a state name')
new_state = exp_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
self._assert_validation_error(
exploration, 'exploration has no states')
exploration.states = {'A string #': new_state}
self._assert_validation_error(
exploration, 'Invalid character # in a state name')
exploration.states = {'A string _': new_state}
self._assert_validation_error(
exploration, 'Invalid character _ in a state name')
exploration.states = {'ABC': new_state}
self._assert_validation_error(
exploration, 'has no initial state name')
exploration.init_state_name = 'initname'
self._assert_validation_error(
exploration,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.')
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
self._assert_validation_error(
exploration, 'destination ABC is not a valid')
# Restore a valid exploration.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
exploration.validate()
# Ensure an answer group with two fuzzy rules is invalid
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': ['Feedback'],
'param_changes': [],
},
'rule_specs': [{
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}, {
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}]
})
)
self._assert_validation_error(
exploration, 'AnswerGroups can only have one fuzzy rule.')
# Restore a valid exploration.
init_state.interaction.answer_groups.pop()
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': ['Feedback'],
'param_changes': [],
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}]
})
)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
self._assert_validation_error(
exploration, 'destination DEF is not a valid')
# Restore a valid exploration.
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
self._assert_validation_error(
exploration, 'RuleSpec \'Contains\' is missing inputs')
rule_spec.inputs = 'Inputs string'
self._assert_validation_error(
exploration, 'Expected inputs to be a dict')
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
self._assert_validation_error(exploration, 'Unrecognized rule type')
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected unicode string, received 15'
):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
self._assert_validation_error(
exploration,
'RuleSpec \'Contains\' has an input with name \'x\' which refers '
'to an unknown parameter within the exploration: ExampleParam')
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
self._assert_validation_error(
exploration, 'Every outcome should have a destination.')
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
self._assert_validation_error(
exploration, 'Expected outcome dest to be a string')
outcome.dest = destination
outcome.feedback = 'Feedback'
self._assert_validation_error(
exploration, 'Expected outcome feedback to be a list')
outcome.feedback = [15]
self._assert_validation_error(
exploration, 'Expected outcome feedback item to be a string')
outcome.feedback = ['Feedback']
exploration.validate()
outcome.param_changes = 'Changes'
self._assert_validation_error(
exploration, 'Expected outcome param_changes to be a list')
outcome.param_changes = []
exploration.validate()
# Validate InteractionInstance.
interaction.id = 15
self._assert_validation_error(
exploration, 'Expected interaction id to be a string')
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
self._assert_validation_error(exploration, 'Invalid interaction id')
interaction.id = 'TextInput'
exploration.validate()
interaction.customization_args = []
self._assert_validation_error(
exploration, 'Expected customization args to be a dict')
interaction.customization_args = {15: ''}
self._assert_validation_error(
exploration, 'Invalid customization arg name')
interaction.customization_args = {'placeholder': ''}
exploration.validate()
interaction.answer_groups = {}
self._assert_validation_error(
exploration, 'Expected answer groups to be a list')
interaction.answer_groups = answer_groups
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have a default outcome.')
interaction.id = 'TextInput'
interaction.default_outcome = None
self._assert_validation_error(
exploration,
'Non-terminal interactions must have a default outcome.')
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have any answer groups.')
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
interaction.answer_groups = []
exploration.validate()
interaction.fallbacks = {}
self._assert_validation_error(
exploration, 'Expected fallbacks to be a list')
# Restore a valid exploration.
interaction.id = 'TextInput'
interaction.answer_groups = answer_groups
interaction.default_outcome = default_outcome
interaction.fallbacks = []
exploration.validate()
# Validate AnswerGroup.
answer_group.rule_specs = {}
self._assert_validation_error(
exploration, 'Expected answer group rules to be a list')
answer_group.rule_specs = []
self._assert_validation_error(
exploration,
'There must be at least one rule for each answer group.')
exploration.states = {
exploration.init_state_name: exp_domain.State.create_default_state(
exploration.init_state_name)
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'English'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
self._assert_validation_error(exploration, 'param_specs to be a dict')
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
self._assert_validation_error(
exploration, 'Only parameter names with characters')
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_fallbacks_validation(self):
"""Test validation of state fallbacks."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
exploration.validate()
base_outcome = {
'dest': exploration.init_state_name,
'feedback': [],
'param_changes': [],
}
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'FakeTriggerName',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': base_outcome,
}])
self._assert_validation_error(exploration, 'Unknown trigger type')
with self.assertRaises(KeyError):
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {},
}])
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {},
},
'outcome': base_outcome,
}])
# Default values for the customization args will be added silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 3,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
'bad_key_that_will_get_stripped_silently': {
'value': 'unused_value',
}
},
},
'outcome': base_outcome,
}])
# Unused customization arg keys will be stripped silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 42,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 2,
},
},
},
'outcome': base_outcome,
}])
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.interaction.default_outcome = None
exploration.validate()
exploration.tags = 'this should be a list'
self._assert_validation_error(
exploration, 'Expected \'tags\' to be a list')
exploration.tags = [123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['abc', 123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['']
self._assert_validation_error(exploration, 'Tags should be non-empty')
exploration.tags = ['123']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = ['ABC']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = [' a b']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b ']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b']
self._assert_validation_error(
exploration, 'Adjacent whitespace in tags should be collapsed')
exploration.tags = ['abc', 'abc']
self._assert_validation_error(
exploration, 'Some tags duplicate each other')
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_exploration_skin_and_gadget_validation(self):
"""Test that Explorations including gadgets validate properly."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
invalid_gadget_instance = exp_domain.GadgetInstance(
'bad_type', 'aUniqueGadgetName', [], {})
with self.assertRaisesRegexp(
utils.ValidationError,
'Unknown gadget with type bad_type is not in the registry.'
):
invalid_gadget_instance.validate()
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
gadget_instance = exploration.skin_instance.panel_contents_dict[
'bottom'][0]
# Force a GadgetInstance to require certain state names.
gadget_instance.visible_in_states.extend(['DEF', 'GHI'])
self._assert_validation_error(
exploration, 'Exploration missing required states: DEF, GHI')
def_state = exp_domain.State.create_default_state('DEF')
def_state.update_interaction_id('TextInput')
exploration.states['DEF'] = def_state
self._assert_validation_error(
exploration, 'Exploration missing required state: GHI')
ghi_state = exp_domain.State.create_default_state('GHI')
ghi_state.update_interaction_id('TextInput')
exploration.states['GHI'] = ghi_state
exploration.validate()
# Force a gadget name collision.
gadget_instance.visible_in_states = ['DEF']
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
exploration.skin_instance.panel_contents_dict[
'bottom'][1].visible_in_states = ['GHI']
self._assert_validation_error(
exploration,
'ATestGadget gadget instance name must be unique.')
exploration.skin_instance.panel_contents_dict['bottom'].pop()
gadget_instance.visible_in_states.extend(['DEF'])
self._assert_validation_error(
exploration,
'TestGadget specifies visibility repeatedly for state: DEF')
# Remove duplicate state.
gadget_instance.visible_in_states.pop()
# Adding a panel that doesn't exist in the skin.
exploration.skin_instance.panel_contents_dict[
'non_existent_panel'] = []
self._assert_validation_error(
exploration,
'The panel name \'non_existent_panel\' is invalid.')
def test_gadget_name_validation(self):
"""Test that gadget naming conditions validate properly."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
gadget_instance = exploration.skin_instance.panel_contents_dict[
'bottom'][0]
gadget_instance.validate()
gadget_instance.name = ''
self._assert_validation_error(
gadget_instance, 'Gadget name must not be an empty string.')
gadget_instance.name = 0
self._assert_validation_error(
gadget_instance,
'Gadget name must be a string. Received type: int')
gadget_instance.name = 'ASuperLongGadgetNameThatExceedsTheLimit'
max_length = exp_domain.GadgetInstance._MAX_GADGET_NAME_LENGTH # pylint: disable=protected-access
self._assert_validation_error(
gadget_instance,
'ASuperLongGadgetNameThatExceedsTheLimit gadget name'
' exceeds maximum length of %d' % max_length)
gadget_instance.name = 'VERYGADGET!'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: VERYGADGET!')
gadget_instance.name = 'Name with \t tab'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with \t tab')
gadget_instance.name = 'Name with \n newline'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with \n newline')
gadget_instance.name = 'Name with 3 space'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with 3 space')
gadget_instance.name = ' untrim whitespace '
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: untrim whitespace ')
# Names with spaces and number should pass.
gadget_instance.name = 'Space and 1'
gadget_instance.validate()
def test_exploration_get_gadget_types(self):
"""Test that Exploration.get_gadget_types returns apt results."""
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
'An Exploration ID', SAMPLE_YAML_CONTENT)
self.assertEqual(exploration_without_gadgets.get_gadget_types(), [])
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(
exploration_with_gadgets.get_gadget_types(), ['TestGadget'])
another_gadget = exp_domain.GadgetInstance(
'AnotherGadget', 'GadgetUniqueName1', [], {}
)
exploration_with_gadgets.skin_instance.panel_contents_dict[
'bottom'].append(another_gadget)
self.assertEqual(
exploration_with_gadgets.get_gadget_types(),
['AnotherGadget', 'TestGadget']
)
def test_title_category_and_objective_validation(self):
"""Test that titles, categories and objectives are validated only in
'strict' mode.
"""
self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration = exp_services.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'title must be specified'
):
exploration.validate(strict=True)
exploration.title = 'A title'
with self.assertRaisesRegexp(
utils.ValidationError, 'category must be specified'
):
exploration.validate(strict=True)
exploration.category = 'A category'
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'
):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration('abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.from_dict(demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration('0')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
class StateExportUnitTests(test_utils.GenericTestBase):
"""Test export of states."""
def test_export_state_to_dict(self):
"""Test exporting a state to a dict."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['New state'])
state_dict = exploration.states['New state'].to_dict()
expected_dict = {
'content': [{
'type': 'text',
'value': u''
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(expected_dict, state_dict)
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
EXP_ID = 'An exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='Title', category='Category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.states['New state'].update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
}])
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'State1:\n(\nInvalid yaml')
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version >= 10, received: 9'
):
exp_domain.Exploration.from_yaml(
'exp4', SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version <= 9'
):
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', SAMPLE_YAML_CONTENT)
def test_yaml_import_and_export_without_gadgets(self):
"""Test from_yaml() and to_yaml() methods without gadgets."""
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
self.EXP_ID, SAMPLE_YAML_CONTENT)
yaml_content = exploration_without_gadgets.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
def test_yaml_import_and_export_with_gadgets(self):
"""Test from_yaml() and to_yaml() methods including gadgets."""
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
self.EXP_ID, SAMPLE_YAML_CONTENT_WITH_GADGETS)
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
generated_yaml = exploration_with_gadgets.to_yaml()
generated_yaml_as_dict = utils.dict_from_yaml(generated_yaml)
sample_yaml_as_dict = utils.dict_from_yaml(
SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(generated_yaml_as_dict, sample_yaml_as_dict)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Exploration domain object class.
"""
def test_correct_states_schema_conversion_methods_exist(self):
"""Test that the right states schema conversion methods exist."""
current_states_schema_version = (
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
for version_num in range(current_states_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version,
current_states_schema_version + 1)))
def test_correct_exploration_schema_conversion_methods_exist(self):
"""Test that the right exploration schema conversion methods exist."""
current_exp_schema_version = (
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION)
for version_num in range(1, current_exp_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
current_exp_schema_version, current_exp_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = ("""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 6
tags: []
""")
YAML_CONTENT_V10 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 10
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 7
tags: []
title: Title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V10
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v10(self):
"""Test direct loading from a v10 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V10)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
exp_title = 'Title'
second_state_name = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title=exp_title, category='Category')
exploration.add_states([second_state_name])
def _get_default_state_dict(content_str, dest_name):
return {
'content': [{
'type': 'text',
'value': content_str,
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': exp_title,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
second_state_name: _get_default_state_dict(
'', second_state_name),
},
'param_changes': [],
'param_specs': {},
'skin_customizations': (
exp_domain.SkinInstance._get_default_skin_customizations() # pylint: disable=protected-access
),
'language_code': 'en',
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'
):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
def test_state_operations(self):
"""Test adding, updating and checking existence of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
self.assertNotIn('invalid_state_name', exploration.states)
self.assertEqual(len(exploration.states), 1)
default_state_name = exploration.init_state_name
exploration.rename_state(default_state_name, 'Renamed state')
self.assertEqual(len(exploration.states), 1)
self.assertEqual(exploration.init_state_name, 'Renamed state')
# Add a new state.
exploration.add_states(['State 2'])
self.assertEqual(len(exploration.states), 2)
# It is OK to rename a state to the same name.
exploration.rename_state('State 2', 'State 2')
# But it is not OK to add or rename a state using a name that already
# exists.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.add_states(['State 2'])
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'Renamed state')
# And it is OK to rename a state to 'END' (old terminal pseudostate). It
# is tested throughout this test because a lot of old behavior used to
# be specific to states named 'END'. These tests validate that is no
# longer the situation.
exploration.rename_state('State 2', 'END')
# Should successfully be able to name it back.
exploration.rename_state('END', 'State 2')
# The exploration now has exactly two states.
self.assertNotIn(default_state_name, exploration.states)
self.assertIn('Renamed state', exploration.states)
self.assertIn('State 2', exploration.states)
# Can successfully add 'END' state
exploration.add_states(['END'])
# Should fail to rename like any other state
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'END')
# Ensure the other states are connected to END
exploration.states[
'Renamed state'].interaction.default_outcome.dest = 'State 2'
exploration.states['State 2'].interaction.default_outcome.dest = 'END'
# Ensure the other states have interactions
exploration.states['Renamed state'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
# Other miscellaneous requirements for validation
exploration.title = 'Title'
exploration.category = 'Category'
exploration.objective = 'Objective'
# The exploration should NOT be terminable even though it has a state
# called 'END' and everything else is connected to it.
with self.assertRaises(Exception):
exploration.validate(strict=True)
# Renaming the node to something other than 'END' and giving it an
# EndExploration is enough to validate it, though it cannot have a
# default outcome or answer groups.
exploration.rename_state('END', 'AnotherEnd')
another_end_state = exploration.states['AnotherEnd']
another_end_state.update_interaction_id('EndExploration')
another_end_state.interaction.default_outcome = None
exploration.validate(strict=True)
# Name it back for final tests
exploration.rename_state('AnotherEnd', 'END')
# Should be able to successfully delete it
exploration.delete_state('END')
self.assertNotIn('END', exploration.states)
class GadgetOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on gadgets."""
def test_gadget_operations(self):
"""Test deletion of gadgets."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].type, TEST_GADGET_DICT['gadget_type'])
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].name, TEST_GADGET_DICT['gadget_name'])
with self.assertRaisesRegexp(
ValueError, 'Gadget NotARealGadget does not exist.'
):
exploration.rename_gadget('NotARealGadget', 'ANewName')
exploration.rename_gadget(
TEST_GADGET_DICT['gadget_name'], 'ANewName')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].name, 'ANewName')
# Add another gadget.
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
self.assertEqual(
exploration.get_all_gadget_names(),
['ANewName', 'ATestGadget']
)
with self.assertRaisesRegexp(
ValueError, 'Duplicate gadget name: ANewName'
):
exploration.rename_gadget('ATestGadget', 'ANewName')
gadget_instance = exploration.get_gadget_instance_by_name(
'ANewName')
self.assertIs(
exploration.skin_instance.panel_contents_dict['bottom'][0],
gadget_instance
)
panel = exploration._get_panel_for_gadget('ANewName') # pylint: disable=protected-access
self.assertEqual(panel, 'bottom')
exploration.delete_gadget('ANewName')
exploration.delete_gadget('ATestGadget')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'], [])
with self.assertRaisesRegexp(
ValueError, 'Gadget ANewName does not exist.'
):
exploration.delete_gadget('ANewName')
class SkinInstanceUnitTests(test_utils.GenericTestBase):
"""Test methods for SkinInstance."""
_SAMPLE_SKIN_INSTANCE_DICT = {
'skin_id': 'conversation_v1',
'skin_customizations': {
'panels_contents': {
'bottom': [
{
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS,
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'visible_in_states': ['New state', 'Second state']
}
]
}
}
}
def test_get_state_names_required_by_gadgets(self):
"""Test accurate computation of state_names_required_by_gadgets."""
skin_instance = exp_domain.SkinInstance(
'conversation_v1',
self._SAMPLE_SKIN_INSTANCE_DICT['skin_customizations'])
self.assertEqual(
skin_instance.get_state_names_required_by_gadgets(),
['New state', 'Second state'])
def test_generation_of_get_default_skin_customizations(self):
"""Tests that default skin customizations are created properly."""
skin_instance = exp_domain.SkinInstance(feconf.DEFAULT_SKIN_ID, None)
self.assertEqual(
skin_instance.panel_contents_dict,
{'bottom': []}
)
def test_conversion_of_skin_to_and_from_dict(self):
"""Tests conversion of SkinInstance to and from dict representations."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
skin_instance = exploration.skin_instance
skin_instance_as_dict = skin_instance.to_dict()
self.assertEqual(
skin_instance_as_dict,
self._SAMPLE_SKIN_INSTANCE_DICT)
skin_instance_as_instance = exp_domain.SkinInstance.from_dict(
skin_instance_as_dict)
self.assertEqual(skin_instance_as_instance.skin_id, 'conversation_v1')
self.assertEqual(
sorted(skin_instance_as_instance.panel_contents_dict.keys()),
['bottom'])
class GadgetInstanceUnitTests(test_utils.GenericTestBase):
"""Tests methods instantiating and validating GadgetInstances."""
def test_gadget_instantiation(self):
"""Test instantiation of GadgetInstances."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(len(exploration.skin_instance.panel_contents_dict[
'bottom']), 1)
def test_gadget_instance_properties(self):
"""Test accurate representation of gadget properties."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['bottom'][0]
self.assertEqual(test_gadget_instance.height, 50)
self.assertEqual(test_gadget_instance.width, 60)
self.assertIn('New state', test_gadget_instance.visible_in_states)
def test_gadget_instance_validation(self):
"""Test validation of GadgetInstance."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['bottom'][0]
# Validation against sample YAML should pass without error.
exploration.validate()
# Assert size exceeded error triggers when a gadget's size exceeds
# a panel's capacity.
with self.swap(
test_gadget_instance.gadget,
'width_px',
4600):
self._assert_validation_error(
exploration,
'Width 4600 of panel \'bottom\' exceeds limit of 350')
# Assert internal validation against CustomizationArgSpecs.
test_gadget_instance.customization_args[
'adviceObjects']['value'].extend(
[
{'adviceTitle': 'test_title', 'adviceHtml': 'test html'},
{'adviceTitle': 'another_title', 'adviceHtml': 'more html'},
{'adviceTitle': 'third_title', 'adviceHtml': 'third html'}
]
)
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget is limited to 3 tips, found 4.'
):
test_gadget_instance.validate()
test_gadget_instance.customization_args[
'adviceObjects']['value'].pop()
# Assert that too many gadgets in a panel raise a ValidationError.
panel_contents_dict['bottom'].append(test_gadget_instance)
with self.assertRaisesRegexp(
utils.ValidationError,
'\'bottom\' panel expected at most 1 gadget, but 2 gadgets are '
'visible in state \'New state\'.'
):
exploration.validate()
# Assert that an error is raised when a gadget is not visible in any
# states.
test_gadget_instance.visible_in_states = []
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget gadget not visible in any states.'):
test_gadget_instance.validate()
def test_conversion_of_gadget_instance_to_and_from_dict(self):
"""Test conversion of GadgetInstance to and from dict. """
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
test_gadget_instance = panel_contents_dict['bottom'][0]
test_gadget_as_dict = test_gadget_instance.to_dict()
self.assertEqual(
test_gadget_as_dict,
{
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'visible_in_states': ['New state', 'Second state'],
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS
}
)
test_gadget_as_instance = exp_domain.GadgetInstance.from_dict(
test_gadget_as_dict)
self.assertEqual(test_gadget_as_instance.width, 60)
self.assertEqual(test_gadget_as_instance.height, 50)
class GadgetVisibilityInStatesUnitTests(test_utils.GenericTestBase):
"""Tests methods affecting gadget visibility in states."""
def test_retrieving_affected_gadgets(self):
"""Test that appropriate gadgets are retrieved."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
affected_gadget_instances = (
exploration._get_gadget_instances_visible_in_state('Second state')) # pylint: disable=protected-access
self.assertEqual(len(affected_gadget_instances), 1)
self.assertEqual(affected_gadget_instances[0].name, 'ATestGadget')
| mit0110/oppia | core/domain/exp_domain_test.py | Python | apache-2.0 | 69,795 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import time
import tensorflow as tf
def StopInN(coord, n_secs):
time.sleep(n_secs)
coord.request_stop()
def RaiseInN(coord, n_secs, ex, report_exception):
try:
time.sleep(n_secs)
raise ex
except RuntimeError as e:
if report_exception:
coord.request_stop(e)
else:
coord.request_stop(sys.exc_info())
def RaiseInNUsingContextHandler(coord, n_secs, ex):
with coord.stop_on_exception():
time.sleep(n_secs)
raise ex
def SleepABit(n_secs):
time.sleep(n_secs)
class CoordinatorTest(tf.test.TestCase):
def testStopAPI(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
coord.request_stop()
self.assertTrue(coord.should_stop())
self.assertTrue(coord.wait_for_stop(0.01))
def testStopAsync(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.1))
threading.Thread(target=StopInN, args=(coord, 0.02)).start()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
self.assertTrue(coord.wait_for_stop(0.03))
self.assertTrue(coord.should_stop())
def testJoin(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01,)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01,))]
for t in threads:
t.start()
coord.join(threads)
def testJoinGraceExpires(self):
def TestWithGracePeriod(stop_grace_period):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=StopInN, args=(coord, 0.01)),
threading.Thread(target=SleepABit, args=(10.0,))]
for t in threads:
t.daemon = True
t.start()
with self.assertRaisesRegexp(RuntimeError, "threads still running"):
coord.join(threads, stop_grace_period_secs=stop_grace_period)
TestWithGracePeriod(1e-10)
TestWithGracePeriod(0.002)
TestWithGracePeriod(1.0)
def testJoinRaiseReportExcInfo(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), False)),
threading.Thread(target=RaiseInN,
args=(coord, 0.02, RuntimeError("Too late"), False))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportException(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), True)),
threading.Thread(target=RaiseInN,
args=(coord, 0.02, RuntimeError("Too late"), True))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinIgnoresOutOfRange(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01,
tf.errors.OutOfRangeError(None, None, "First"),
True))
]
for t in threads:
t.start()
coord.join(threads)
def testJoinIgnoresMyExceptionType(self):
coord = tf.train.Coordinator(clean_stop_exception_types=(ValueError,))
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, ValueError("Clean stop"), True))
]
for t in threads:
t.start()
coord.join(threads)
def testJoinRaiseReportExceptionUsingHandler(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.01, RuntimeError("First"))),
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.02, RuntimeError("Too late")))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testClearStopClearsExceptionToo(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
coord.clear_stop()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("Second"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "Second"):
coord.join(threads)
def _StopAt0(coord, n):
if n[0] == 0:
coord.request_stop()
else:
n[0] -= 1
class LooperTest(tf.test.TestCase):
def testTargetArgs(self):
n = [3]
coord = tf.train.Coordinator()
thread = tf.train.LooperThread.loop(coord, 0, target=_StopAt0,
args=(coord, n))
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetKwargs(self):
n = [3]
coord = tf.train.Coordinator()
thread = tf.train.LooperThread.loop(coord, 0, target=_StopAt0,
kwargs={"coord": coord, "n": n})
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetMixedArgs(self):
n = [3]
coord = tf.train.Coordinator()
thread = tf.train.LooperThread.loop(coord, 0, target=_StopAt0,
args=(coord,), kwargs={"n": n})
coord.join([thread])
self.assertEqual(0, n[0])
if __name__ == "__main__":
tf.test.main()
| HaebinShin/tensorflow | tensorflow/python/training/coordinator_test.py | Python | apache-2.0 | 6,639 |
import os
from ._base import DanubeCloudCommand, CommandError
class Command(DanubeCloudCommand):
help = 'Check the existence of SECRET_KEY in local_settings.py and generate one if needed.'
def handle(self, *args, **options):
try:
from core import local_settings
except ImportError:
local_settings = None
fn = self._path(self.PROJECT_DIR, 'core', 'local_settings.py')
else:
fn = local_settings.__file__.replace('local_settings.pyc', 'local_settings.py')
try:
# noinspection PyUnresolvedReferences
key = local_settings.SECRET_KEY
except AttributeError:
self.display('Missing SECRET_KEY in local_settings.py', color='yellow')
key = os.urandom(128).encode('base64')[:76]
with open(fn, 'a') as f:
f.write('\nSECRET_KEY="""' + key + '"""\n')
self.display('New SECRET_KEY was saved in %s' % fn, color='green')
if key:
self.display('SECRET_KEY is OK', color='green')
else:
raise CommandError('SECRET_KEY is empty!')
| erigones/esdc-ce | core/management/commands/secret_key.py | Python | apache-2.0 | 1,138 |
# Copyright 2017-18 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from datetime import date
from odoo.tests.common import TransactionCase
class TestStockRemovalLocationByPriority(TransactionCase):
def setUp(self):
super(TestStockRemovalLocationByPriority, self).setUp()
self.res_users_model = self.env['res.users']
self.stock_location_model = self.env['stock.location']
self.stock_warehouse_model = self.env['stock.warehouse']
self.stock_picking_model = self.env['stock.picking']
self.stock_change_model = self.env['stock.change.product.qty']
self.product_model = self.env['product.product']
self.quant_model = self.env['stock.quant']
self.picking_internal = self.env.ref('stock.picking_type_internal')
self.picking_out = self.env.ref('stock.picking_type_out')
self.location_supplier = self.env.ref('stock.stock_location_suppliers')
self.company = self.env.ref('base.main_company')
grp_rem_priority = self.env.ref(
'stock_removal_location_by_priority.group_removal_priority')
# We assign the group to admin, as the _get_removal_strategy_order
# method is going to be always executed as sudo.
user_admin = self.env.ref('base.user_root')
user_admin.groups_id = [(4, grp_rem_priority.id, 0)]
self.wh1 = self.stock_warehouse_model.create({
'name': 'WH1',
'code': 'WH1',
})
# Removal strategies:
self.fifo = self.env.ref('stock.removal_fifo')
self.lifo = self.env.ref('stock.removal_lifo')
# Create locations:
self.stock = self.stock_location_model.create({
'name': 'Stock Base',
'usage': 'internal',
})
self.shelf_A = self.stock_location_model.create({
'name': 'Shelf_A',
'usage': 'internal',
'location_id': self.stock.id,
'removal_priority': 10,
})
self.shelf_B = self.stock_location_model.create({
'name': 'Shelf_B',
'usage': 'internal',
'location_id': self.stock.id,
'removal_priority': 5,
})
self.stock_2 = self.stock_location_model.create({
'name': 'Another Stock Location',
'usage': 'internal',
})
# Create a product:
self.product_1 = self.product_model.create({
'name': 'Test Product 1',
'type': 'product',
})
# Create quants:
today = date.today()
q1 = self.quant_model.create({
'product_id': self.product_1.id,
'location_id': self.shelf_A.id,
'quantity': 5.0,
'in_date': today,
})
q2 = self.quant_model.create({
'product_id': self.product_1.id,
'location_id': self.shelf_B.id,
'quantity': 5.0,
'in_date': today,
})
self.quants = q1 + q2
def _create_picking(self, picking_type, location, location_dest, qty):
picking = self.stock_picking_model.create({
'picking_type_id': picking_type.id,
'location_id': location.id,
'location_dest_id': location_dest.id,
'move_lines': [
(0, 0, {
'name': 'Test move',
'product_id': self.product_1.id,
'product_uom': self.product_1.uom_id.id,
'product_uom_qty': qty,
'location_id': location.id,
'location_dest_id': location_dest.id,
'price_unit': 2,
})]
})
return picking
def test_01_stock_removal_location_by_priority_fifo(self):
"""Tests removal priority with FIFO strategy."""
self.stock.removal_strategy_id = self.fifo
# quants must start unreserved
for q in self.quants:
self.assertEqual(q.reserved_quantity, 0.0)
if q.location_id == self.shelf_A:
self.assertEqual(q.removal_priority, 10)
if q.location_id == self.shelf_B:
self.assertEqual(q.removal_priority, 5)
self.assertEqual(self.quants[0].in_date, self.quants[1].in_date)
picking_1 = self._create_picking(
self.picking_internal, self.stock, self.stock_2, 5)
picking_1.action_confirm()
picking_1.action_assign()
# quants must be reserved in Shelf B (lower removal_priority value).
for q in self.quants:
if q.location_id == self.shelf_A:
self.assertEqual(q.reserved_quantity, 0.0)
if q.location_id == self.shelf_B:
self.assertEqual(q.reserved_quantity, 5.0)
def test_02_stock_removal_location_by_priority_lifo(self):
"""Tests removal priority with LIFO strategy."""
self.stock.removal_strategy_id = self.lifo
# quants must start unreserved
for q in self.quants:
self.assertEqual(q.reserved_quantity, 0.0)
if q.location_id == self.shelf_A:
self.assertEqual(q.removal_priority, 10)
if q.location_id == self.shelf_B:
self.assertEqual(q.removal_priority, 5)
self.assertEqual(self.quants[0].in_date, self.quants[1].in_date)
picking_1 = self._create_picking(
self.picking_internal, self.stock, self.stock_2, 5)
picking_1.action_confirm()
picking_1.action_assign()
# quants must be reserved in Shelf B (lower removal_priority value).
for q in self.quants:
if q.location_id == self.shelf_A:
self.assertEqual(q.reserved_quantity, 0.0)
if q.location_id == self.shelf_B:
self.assertEqual(q.reserved_quantity, 5.0)
| Vauxoo/stock-logistics-warehouse | stock_removal_location_by_priority/tests/test_stock_removal_location_by_priority.py | Python | agpl-3.0 | 5,936 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
# Used in the tests for run_python_module
import sys
print("runmod2: passed %s" % sys.argv[1])
| blueyed/coveragepy | tests/modules/pkg1/runmod2.py | Python | apache-2.0 | 251 |
# coding: utf-8
testinfo = "s, t 0.77, s, q"
tags = "animation"
from nose.tools import nottest
import summa
from summa.director import director
from summa.sprite import Sprite
import pyglet
from customstuff import TimedScene
import os
pyglet.resource.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__))))
pyglet.resource.reindex()
class TestLayer(summa.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.sprite = Sprite(pyglet.resource.animation('dinosaur.gif'))
self.sprite.position = x/2, y/2
self.add(self.sprite)
# esta causando fallos de segmentación en la prueba
@nottest
def test_animation():
director.init()
test_layer = TestLayer()
main_scene = TimedScene(test_layer)
director.run(main_scene)
| shackra/thomas-aquinas | tests/test_animation.py | Python | bsd-3-clause | 852 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-01 08:13
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('avatar', models.CharField(max_length=150)),
('url', models.CharField(max_length=50)),
('about', models.TextField(blank=True, max_length=500)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| bugulin/gymgeek-web | accounts/migrations/0001_initial.py | Python | apache-2.0 | 3,127 |
class ValidationError(Exception):
def __init__(self, data):
self.data = data
class BadConfiguration(Exception):
pass
class BadValidation(Exception):
pass
| hugollm/lie2me | lie2me/exceptions.py | Python | mit | 179 |
###############################################################################
# Name: osutil.py #
# Purpose: Text Utilities. #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2010 Cody Precord <[email protected]> #
# Licence: wxWindows Licence #
###############################################################################
"""
Editra Business Model Library: Operating System Utilities
Utilities for handling OS related interactions.
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: $"
__revision__ = "$Revision: $"
__all__ = ['InstallTermHandler',
'GetWindowsDrives', 'GetWindowsDriveType',
'GenericDrive', 'FixedDrive', 'CDROMDrive', 'RamDiskDrive', 'RemoteDrive',
'RemovableDrive' ]
#-----------------------------------------------------------------------------#
# Imports
import wx
import ctypes
import signal
import collections
HASWIN32 = False
if wx.Platform == '__WXMSW__':
try:
import win32api
except ImportError:
HASWIN32 = False
else:
HASWIN32 = True
#-----------------------------------------------------------------------------#
# Windows Drive Utilities
class GenericDrive(object):
def __init__(self, name):
super(GenericDrive, self).__init__()
# Attributes
self._name = name
Name = property(lambda self: self._name,
lambda self, v: setattr(self, '_name', v))
class RemovableDrive(GenericDrive):
pass
class FixedDrive(GenericDrive):
pass
class RemoteDrive(GenericDrive):
pass
class CDROMDrive(GenericDrive):
pass
class RamDiskDrive(GenericDrive):
pass
def GetWindowsDrives():
"""Get a list of all available windows drives
@return: list of strings
"""
assert wx.Platform == '__WXMSW__', "Windows Only API Method"
drives = list()
try:
dletters = list()
bmask = ctypes.windll.kernel32.GetLogicalDrives()
for dletter in u"ABCDEFGHIJKLMNOPQRSTUVWXYZ":
if bmask & 1:
dletters.append(dletter)
bmask >>= 1
for dletter in dletters:
dname = dletter + u":\\"
dtype = GetWindowsDriveType(dname)
if type(dtype) != GenericDrive:
drives.append(dtype)
except Exception, err:
pass
return drives
def GetWindowsDriveType(dname):
"""Get the drive type for the given letter"""
assert wx.Platform == '__WXMSW__', "Windows Only API Method"
dtype = GenericDrive(dname)
try:
dtypes = [None, None, RemovableDrive, FixedDrive, RemoteDrive, CDROMDrive, RamDiskDrive]
idx = ctypes.windll.kernel32.GetDriveTypeW(dname)
if idx < len(dtypes):
drive = dtypes[idx]
if drive:
dtype = drive(dname)
except:
pass
return dtype
#-----------------------------------------------------------------------------#
def InstallTermHandler(callback, *args, **kwargs):
"""Install exit app handler for sigterm (unix/linux)
and uses SetConsoleCtrlHandler on Windows.
@param callback: callable(*args, **kwargs)
@param args: positional arguments to pass to callback
@param kwargs: keyword arguments to pass to callback
@return: bool (installed or not)
"""
assert isinstance(callback, collections.Callable), "callback must be callable!"
installed = True
if wx.Platform == '__WXMSW__':
if HASWIN32:
win32api.SetConsoleCtrlHandler(lambda dummy : callback(*args, **kwargs),
True)
else:
installed = False
else:
signal.signal(signal.SIGTERM,
lambda signum, frame : callback(*args, **kwargs))
return installed
| ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/ebmlib/osutil.py | Python | mit | 4,134 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('companies', '0003_auto_20170814_0301'),
]
operations = [
migrations.AlterField(
model_name='company',
name='about_markup_type',
field=models.CharField(choices=[('', '--'), ('html', 'HTML'), ('plain', 'Plain'), ('markdown', 'Markdown'), ('restructuredtext', 'Restructured Text')], default='restructuredtext', max_length=30),
),
]
| proevo/pythondotorg | companies/migrations/0004_auto_20170821_2000.py | Python | apache-2.0 | 573 |
#-*- coding: utf-8 -*-
from django.core import urlresolvers
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer.fields.multistorage_file import MultiStorageFileField
from filer.models import mixins
from filer import settings as filer_settings
from filer.models.foldermodels import Folder
from polymorphic import PolymorphicModel, PolymorphicManager
import hashlib
import os
class FileManager(PolymorphicManager):
def find_all_duplicates(self):
r = {}
for file_obj in self.all():
if file_obj.sha1:
q = self.filter(sha1=file_obj.sha1)
if len(q) > 1:
r[file_obj.sha1] = q
return r
def find_duplicates(self, file_obj):
return [i for i in self.exclude(pk=file_obj.pk).filter(sha1=file_obj.sha1)]
class File(PolymorphicModel, mixins.IconsMixin):
file_type = 'File'
_icon = "file"
folder = models.ForeignKey(Folder, verbose_name=_('folder'), related_name='all_files',
null=True, blank=True)
file = MultiStorageFileField(_('file'), null=True, blank=True, max_length=255)
_file_size = models.IntegerField(_('file size'), null=True, blank=True)
sha1 = models.CharField(_('sha1'), max_length=40, blank=True, default='')
has_all_mandatory_data = models.BooleanField(_('has all mandatory data'), default=False, editable=False)
original_filename = models.CharField(_('original filename'), max_length=255, blank=True, null=True)
name = models.CharField(max_length=255, default="", blank=True,
verbose_name=_('name'))
description = models.TextField(null=True, blank=True,
verbose_name=_('description'))
owner = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
related_name='owned_%(class)ss',
null=True, blank=True, verbose_name=_('owner'))
uploaded_at = models.DateTimeField(_('uploaded at'), auto_now_add=True)
modified_at = models.DateTimeField(_('modified at'), auto_now=True)
is_public = models.BooleanField(
default=filer_settings.FILER_IS_PUBLIC_DEFAULT,
verbose_name=_('Permissions disabled'),
help_text=_('Disable any permission checking for this ' +\
'file. File will be publicly accessible ' +\
'to anyone.'))
objects = FileManager()
@classmethod
def matches_file_type(cls, iname, ifile, request):
return True # I match all files...
def __init__(self, *args, **kwargs):
super(File, self).__init__(*args, **kwargs)
self._old_is_public = self.is_public
def _move_file(self):
"""
Move the file from src to dst.
"""
src_file_name = self.file.name
dst_file_name = self._meta.get_field('file').generate_filename(
self, self.original_filename)
if self.is_public:
src_storage = self.file.storages['private']
dst_storage = self.file.storages['public']
else:
src_storage = self.file.storages['public']
dst_storage = self.file.storages['private']
# delete the thumbnail
# We are toggling the is_public to make sure that easy_thumbnails can
# delete the thumbnails
self.is_public = not self.is_public
self.file.delete_thumbnails()
self.is_public = not self.is_public
# This is needed because most of the remote File Storage backend do not
# open the file.
src_file = src_storage.open(src_file_name)
src_file.open()
self.file = dst_storage.save(dst_file_name,
ContentFile(src_file.read()))
src_storage.delete(src_file_name)
def _copy_file(self, destination, overwrite=False):
"""
Copies the file to a destination files and returns it.
"""
if overwrite:
# If the destination file already exists default storage backend
# does not overwrite it but generates another filename.
# TODO: Find a way to override this behavior.
raise NotImplementedError
src_file_name = self.file.name
storage = self.file.storages['public' if self.is_public else 'private']
# This is needed because most of the remote File Storage backend do not
# open the file.
src_file = storage.open(src_file_name)
src_file.open()
return storage.save(destination, ContentFile(src_file.read()))
def generate_sha1(self):
sha = hashlib.sha1()
self.file.seek(0)
sha.update(self.file.read())
self.sha1 = sha.hexdigest()
# to make sure later operations can read the whole file
self.file.seek(0)
def save(self, *args, **kwargs):
# check if this is a subclass of "File" or not and set
# _file_type_plugin_name
if self.__class__ == File:
# what should we do now?
# maybe this has a subclass, but is being saved as a File instance
# anyway. do we need to go check all possible subclasses?
pass
elif issubclass(self.__class__, File):
self._file_type_plugin_name = self.__class__.__name__
# cache the file size
# TODO: only do this if needed (depending on the storage backend the whole file will be downloaded)
try:
self._file_size = self.file.size
except:
pass
if self._old_is_public != self.is_public and self.pk:
self._move_file()
self._old_is_public = self.is_public
# generate SHA1 hash
# TODO: only do this if needed (depending on the storage backend the whole file will be downloaded)
try:
self.generate_sha1()
except Exception as e:
pass
super(File, self).save(*args, **kwargs)
save.alters_data = True
def delete(self, *args, **kwargs):
# Delete the model before the file
super(File, self).delete(*args, **kwargs)
# Delete the file if there are no other Files referencing it.
if not File.objects.filter(file=self.file.name, is_public=self.is_public).exists():
self.file.delete(False)
delete.alters_data = True
@property
def label(self):
if self.name in ['', None]:
text = self.original_filename or 'unnamed file'
else:
text = self.name
text = "%s" % (text,)
return text
def __lt__(self, other):
def cmp(a, b):
return (a > b) - (a < b)
return cmp(self.label.lower(), other.label.lower()) < 0
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, permission_type):
"""
Return true if the current user has permission on this
image. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated():
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
elif self.folder:
return self.folder.has_generic_permission(request, permission_type)
else:
return False
def __unicode__(self):
if self.name in ('', None):
text = "%s" % (self.original_filename,)
else:
text = "%s" % (self.name,)
return text
def get_admin_url_path(self):
return urlresolvers.reverse(
'admin:%s_%s_change' % (self._meta.app_label,
self._meta.module_name,),
args=(self.pk,)
)
@property
def file_ptr(self):
"""
Evil hack to get around the cascade delete problem with django_polymorphic.
Prevents ``AttributeError: 'File' object has no attribute 'file_ptr'``.
This is only a workaround for one level of subclassing. The hierarchy of
object in the admin delete view is wrong, but at least it works.
"""
return self
@property
def url(self):
"""
to make the model behave like a file field
"""
try:
r = self.file.url
except:
r = ''
return r
@property
def path(self):
try:
return self.file.path
except:
return ""
@property
def size(self):
return self._file_size or 0
@property
def extension(self):
filetype = os.path.splitext(self.file.name)[1].lower()
if len(filetype) > 0:
filetype = filetype[1:]
return filetype
@property
def logical_folder(self):
"""
if this file is not in a specific folder return the Special "unfiled"
Folder object
"""
if not self.folder:
from filer.models.virtualitems import UnfiledImages
return UnfiledImages()
else:
return self.folder
@property
def logical_path(self):
"""
Gets logical path of the folder in the tree structure.
Used to generate breadcrumbs
"""
folder_path = []
if self.folder:
folder_path.extend(self.folder.get_ancestors())
folder_path.append(self.logical_folder)
return folder_path
@property
def duplicates(self):
return File.objects.find_duplicates(self)
class Meta:
app_label = 'filer'
verbose_name = _('file')
verbose_name_plural = _('files')
| maykinmedia/django-filer | filer/models/filemodels.py | Python | bsd-3-clause | 9,937 |
# -*- coding: utf-8 -*-
"""
MoinMoin - bootstrap theme, based on codereading5
@copyright: 2012 speirs http://www.codereading.com
@copyright: 2013 Luca Barbato
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin.theme import ThemeBase
from MoinMoin.action import get_available_actions
from MoinMoin import wikiutil, config, version, caching
from MoinMoin.Page import Page
class Theme(ThemeBase):
name = "bootstrap"
_ = lambda x: x # We don't have gettext at this moment, so we fake it
icons = {
# key alt icon filename w h
# FileAttach
'attach': ("%(attach_count)s", "moin-attach.png", 16, 16),
'info': ("[INFO]", "moin-info.png", 16, 16),
'attachimg': (_("[ATTACH]"), "attach.png", 32, 32),
# RecentChanges
'rss': (_("[RSS]"), "moin-rss.png", 16, 16),
'deleted': (_("[DELETED]"), "moin-deleted.png", 16, 16),
'updated': (_("[UPDATED]"), "moin-updated.png", 16, 16),
'renamed': (_("[RENAMED]"), "moin-renamed.png", 16, 16),
'conflict': (_("[CONFLICT]"), "moin-conflict.png", 16, 16),
'new': (_("[NEW]"), "moin-new.png", 16, 16),
'diffrc': (_("[DIFF]"), "moin-diff.png", 16, 16),
# General
'bottom': (_("[BOTTOM]"), "moin-bottom.png", 16, 16),
'top': (_("[TOP]"), "moin-top.png", 16, 16),
'www': ("[WWW]", "moin-www.png", 16, 16),
'mailto': ("[MAILTO]", "moin-email.png", 16, 16),
'news': ("[NEWS]", "moin-news.png", 16, 16),
'telnet': ("[TELNET]", "moin-telnet.png", 16, 16),
'ftp': ("[FTP]", "moin-ftp.png", 16, 16),
'file': ("[FILE]", "moin-ftp.png", 16, 16),
# search forms
'searchbutton': ("[?]", "moin-search.png", 16, 16),
'interwiki': ("[%(wikitag)s]", "moin-inter.png", 16, 16),
}
del _
stylesheets = (
# media basename
('all', 'ui'),
('all', 'bootstrap'),
('all', 'pygments'),
('all', 'bs'),
('all', 'libav'),
('all', 'alert'),
)
def send_title(self, text, **keywords):
""" Override
Output the page header (and title).
@param text: the title text
@keyword page: the page instance that called us - using this is more efficient than using pagename..
@keyword pagename: 'PageName'
@keyword print_mode: 1 (or 0)
@keyword editor_mode: 1 (or 0)
@keyword media: css media type, defaults to 'screen'
@keyword allow_doubleclick: 1 (or 0)
@keyword html_head: additional <head> code
@keyword body_attr: additional <body> attributes
@keyword body_onload: additional "onload" JavaScript code
"""
request = self.request
_ = request.getText
rev = request.rev
if keywords.has_key('page'):
page = keywords['page']
pagename = page.page_name
else:
pagename = keywords.get('pagename', '')
page = Page(request, pagename)
if keywords.get('msg', ''):
raise DeprecationWarning("Using send_page(msg=) is deprecated! Use theme.add_msg() instead!")
scriptname = request.script_root
# get name of system pages
page_front_page = wikiutil.getFrontPage(request).page_name
page_help_contents = wikiutil.getLocalizedPage(request, 'HelpContents').page_name
page_title_index = wikiutil.getLocalizedPage(request, 'TitleIndex').page_name
page_site_navigation = wikiutil.getLocalizedPage(request, 'SiteNavigation').page_name
page_word_index = wikiutil.getLocalizedPage(request, 'WordIndex').page_name
page_help_formatting = wikiutil.getLocalizedPage(request, 'HelpOnFormatting').page_name
page_find_page = wikiutil.getLocalizedPage(request, 'FindPage').page_name
home_page = wikiutil.getInterwikiHomePage(request) # sorry theme API change!!! Either None or tuple (wikiname,pagename) now.
page_parent_page = getattr(page.getParentPage(), 'page_name', None)
# set content_type, including charset, so web server doesn't touch it:
request.content_type = "text/html; charset=%s" % (config.charset, )
# Prepare the HTML <head> element
user_head = [request.cfg.html_head]
# include charset information - needed for moin_dump or any other case
# when reading the html without a web server
user_head.append('''<meta charset="%s">\n''' % (page.output_charset))
meta_keywords = request.getPragma('keywords')
meta_desc = request.getPragma('description')
if meta_keywords:
user_head.append('<meta name="keywords" content="%s">\n' % wikiutil.escape(meta_keywords, 1))
if meta_desc:
user_head.append('<meta name="description" content="%s">\n' % wikiutil.escape(meta_desc, 1))
# add meta statement if user has doubleclick on edit turned on or it is default
if (pagename and keywords.get('allow_doubleclick', 0) and
not keywords.get('print_mode', 0) and
request.user.edit_on_doubleclick):
if request.user.may.write(pagename): # separating this gains speed
user_head.append('<meta name="edit_on_doubleclick" content="%s">\n' % (request.script_root or '/'))
# search engine precautions / optimization:
# if it is an action or edit/search, send query headers (noindex,nofollow):
if request.query_string:
user_head.append(request.cfg.html_head_queries)
elif request.method == 'POST':
user_head.append(request.cfg.html_head_posts)
# we don't want to have BadContent stuff indexed:
elif pagename in ['BadContent', 'LocalBadContent', ]:
user_head.append(request.cfg.html_head_posts)
# if it is a special page, index it and follow the links - we do it
# for the original, English pages as well as for (the possibly
# modified) frontpage:
elif pagename in [page_front_page, request.cfg.page_front_page,
page_title_index, 'TitleIndex',
page_find_page, 'FindPage',
page_site_navigation, 'SiteNavigation',
'RecentChanges', ]:
user_head.append(request.cfg.html_head_index)
# if it is a normal page, index it, but do not follow the links, because
# there are a lot of illegal links (like actions) or duplicates:
else:
user_head.append(request.cfg.html_head_normal)
if 'pi_refresh' in keywords and keywords['pi_refresh']:
user_head.append('<meta http-equiv="refresh" content="%d;URL=%s">' % keywords['pi_refresh'])
# output buffering increases latency but increases throughput as well
output = []
output.append("""\
<!DOCTYPE html>
<html lang="%s">
<head>
%s
<meta name="viewport" content="width=device-width, initial-scale=1.0">
%s
%s
""" % (
self.cfg.language_default,
''.join(user_head),
self.html_head({
'page': page,
'title': text,
'sitename': request.cfg.html_pagetitle or request.cfg.sitename,
'print_mode': keywords.get('print_mode', False),
'media': keywords.get('media', 'screen'),
}),
keywords.get('html_head', ''),
))
output.append("</head>\n")
request.write(''.join(output))
output = []
# start the <body>
bodyattr = []
if keywords.has_key('body_attr'):
bodyattr.append(' ')
bodyattr.append(keywords['body_attr'])
# Set body to the user interface language and direction
bodyattr.append(' %s' % self.ui_lang_attr())
body_onload = keywords.get('body_onload', '')
if body_onload:
bodyattr.append(''' onload="%s"''' % body_onload)
output.append('\n<body%s>\n' % ''.join(bodyattr))
# Output -----------------------------------------------------------
# If in print mode, start page div and emit the title
if keywords.get('print_mode', 0):
d = {
'title_text': text,
'page': page,
'page_name': pagename or '',
'rev': rev,
}
request.themedict = d
output.append(self.startPage())
output.append(self.interwiki(d))
output.append(self.title(d))
# In standard mode, emit theme.header
else:
exists = pagename and page.exists(includeDeleted=True)
# prepare dict for theme code:
d = {
'theme': self.name,
'script_name': scriptname,
'title_text': text,
'logo_string': request.cfg.logo_string,
'site_name': request.cfg.sitename,
'page': page,
'rev': rev,
'pagesize': pagename and page.size() or 0,
# exists checked to avoid creation of empty edit-log for non-existing pages
'last_edit_info': exists and page.lastEditInfo() or '',
'page_name': pagename or '',
'page_find_page': page_find_page,
'page_front_page': page_front_page,
'home_page': home_page,
'page_help_contents': page_help_contents,
'page_help_formatting': page_help_formatting,
'page_parent_page': page_parent_page,
'page_title_index': page_title_index,
'page_word_index': page_word_index,
'user_name': request.user.name,
'user_valid': request.user.valid,
'msg': self._status,
'trail': keywords.get('trail', None),
# Discontinued keys, keep for a while for 3rd party theme developers
'titlesearch': 'use self.searchform(d)',
'textsearch': 'use self.searchform(d)',
'navibar': ['use self.navibar(d)'],
'available_actions': ['use self.request.availableActions(page)'],
}
# add quoted versions of pagenames
newdict = {}
for key in d:
if key.startswith('page_'):
if not d[key] is None:
newdict['q_'+key] = wikiutil.quoteWikinameURL(d[key])
else:
newdict['q_'+key] = None
d.update(newdict)
request.themedict = d
# now call the theming code to do the rendering
if keywords.get('editor_mode', 0):
output.append(self.editorheader(d))
else:
output.append(self.header(d))
# emit it
request.write(''.join(output))
output = []
self._send_title_called = True
#end def send_title
def recentchanges_header(self, d):
""" Override """
html = ThemeBase.recentchanges_header(self,d)
return html.replace('<table>', '<table class="table table-bordered">')
#end def recentchanges_header
def header(self, d, **keywords):
""" Override - Assemble wiki header
@param d: parameter dictionary
@rtype: unicode
@return: page header html
"""
html = [
self.bs_site_header(d),
self.bs_breadcrumb(d),
self.bs_container_start(),
self.bs_msg(d),
self.bs_page_header(d),
]
return '\n'.join(html)
#end def header
editorheader = header
def footer(self, d, **keywords):
""" Override Assemble wiki footer
@param d: parameter dictionary
@keyword ...:...
@rtype: unicode
@return: page footer html
"""
html = [
self.bs_footer(),
self.bs_container_end(),
self.bs_footer_js(),
]
return u'\n'.join(html)
#end def footer
def startPage(self):
""" Override """
return u''
#end def startPage
def endPage(self):
""" Override """
return u''
#end def endPage
def html_head(self, d):
""" Override - Assemble html head
@param d: parameter dictionary
@rtype: unicode
@return: html head
"""
html = [
u'<title>%s</title>' % self.bs_custom_title(d),
self.externalScript('common'),
self.headscript(d), # Should move to separate .js file
self.guiEditorScript(d),
self.bs_html_stylesheets(d),
self.rsslink(d),
self.bs_html5_magic(),
self.bs_google_analytics()
]
return u'\n'.join(html)
#end def html_head
def bs_actions(self, page):
""" Create actions menu list and items data dict
@param page: current page, Page object
@rtype: unicode
@return: actions menu html fragment
"""
html = []
request = self.request
_ = request.getText
rev = request.rev
menu = [
'raw',
'print',
'RenderAsDocbook',
'refresh',
'__separator__',
'SpellCheck',
'LikePages',
'LocalSiteMap',
'__separator__',
'RenamePage',
'CopyPage',
'DeletePage',
'__separator__',
'MyPages',
'SubscribeUser',
'__separator__',
'Despam',
'revert',
'PackagePages',
'SyncPages',
]
# TODO use glyph-icons
titles = {
# action: menu title
'__separator__': '',
'raw': _('Raw Text'),
'print': _('Print View'),
'refresh': _('Delete Cache'),
'SpellCheck': _('Check Spelling'), # rename action!
'RenamePage': _('Rename Page'),
'CopyPage': _('Copy Page'),
'DeletePage': _('Delete Page'),
'LikePages': _('Like Pages'),
'LocalSiteMap': _('Local Site Map'),
'MyPages': _('My Pages'),
'SubscribeUser': _('Subscribe User'),
'Despam': _('Remove Spam'),
'revert': _('Revert to this revision'),
'PackagePages': _('Package Pages'),
'RenderAsDocbook': _('Render as Docbook'),
'SyncPages': _('Sync Pages'),
}
html.append(u'<div class="btn-group">')
html.append(u'<a class="btn btn-mini dropdown-toggle" data-toggle="dropdown">')
html.append(_('More Actions'))
html.append(u'<span class="caret"></span>')
html.append(u'</a>')
html.append(u'<ul class="dropdown-menu">')
option = '<li%(state)s><a href="'
option += self.request.href(page.page_name)
option += u'?action=%(action)s'
if rev:
option += u'&rev=%s' % rev
option += u'">%(title)s</a><li>'
disabled = u' class="disabled"'
separator = u'<li class="divider"></li>'
# Format standard actions
available = get_available_actions(request.cfg, page, request.user)
for action in menu:
data = {'action': action,
'state' : u'',
'title' : titles[action]}
# removes excluded actions from the more actions menu
if action in request.cfg.actions_excluded:
continue
# Enable delete cache only if page can use caching
if action == 'refresh':
if not page.canUseCache():
data['action'] = 'show'
data['state'] = disabled
# revert action enabled only if user can revert
if action == 'revert' and not request.user.may.revert(page.page_name):
data['action'] = 'show'
data['state'] = disabled
# SubscribeUser action enabled only if user has admin rights
if action == 'SubscribeUser' and not request.user.may.admin(page.page_name):
data['action'] = 'show'
data['state'] = disabled
# Despam action enabled only for superusers
if action == 'Despam' and not request.user.isSuperUser():
data['action'] = 'show'
data['state'] = disabled
# Special menu items. Without javascript, executing will
# just return to the page.
if action.startswith('__'):
data['action'] = 'show'
# Actions which are not available for this wiki, user or page
if (action == '__separator__'):
html.append(separator)
continue
if (action[0].isupper() and not action in available):
data['state'] = disabled
html.append(option % data)
# Add custom actions not in the standard menu, except for
# some actions like AttachFile (we have them on top level)
more = [item for item in available if not item in titles and not item in ('AttachFile', )]
more.sort()
if more:
# Add separator
html.append(separator)
# Add more actions (all enabled)
for action in more:
data = {'action': action, 'state': ''}
# Always add spaces: AttachFile -> Attach File
# XXX do not create page just for using split_title -
# creating pages for non-existent does 2 storage lookups
#title = Page(request, action).split_title(force=1)
title = action
# Use translated version if available
data['title'] = _(title)
html.append(option % data)
html.append(u'</ul></div>')
return u'\n'.join(html)
def bs_discussion(self, page):
"""Return a button to the discussion page
"""
_ = self.request.getText
suppl_name = self.request.cfg.supplementation_page_name
suppl_name_full = "%s/%s" % (page.page_name, suppl_name)
return page.link_to(self.request, text=_(suppl_name),
querystr={'action': 'supplementation'},
css_class='btn btn-mini', rel='nofollow')
def bs_edit(self, page):
""" Return an edit button to
If the user want to show both editors, it will display "Edit
(Text)", otherwise as "Edit".
"""
if 'edit' in self.request.cfg.actions_excluded:
return ""
css_class = u'btn btn-mini'
if not (page.isWritable() and
self.request.user.may.write(page.page_name)):
css_class += u' disabled'
_ = self.request.getText
querystr = {'action': 'edit'}
guiworks = self.guiworks(page)
text = _('Edit')
if guiworks:
# 'textonly' will be upgraded dynamically to 'guipossible' by JS
querystr['editor'] = 'textonly'
attrs = {'name': 'editlink',
'rel': 'nofollow',
'css_class': css_class}
else:
querystr['editor'] = 'text'
attrs = {'name': 'texteditlink',
'rel': 'nofollow',
'css_class': css_class}
return page.link_to(self.request, text=text, querystr=querystr, **attrs)
def bs_info(self, page):
""" Return link to page information """
if 'info' in self.request.cfg.actions_excluded:
return ""
_ = self.request.getText
return page.link_to(self.request,
text=_('Info'),
querystr={'action': 'info'},
css_class='btn btn-mini', rel='nofollow')
def bs_subscribe(self, page):
""" Return subscribe/unsubscribe link to valid users
@rtype: unicode
@return: subscribe or unsubscribe link
"""
if not ((self.cfg.mail_enabled or self.cfg.jabber_enabled) and self.request.user.valid):
return ''
_ = self.request.getText
if self.request.user.isSubscribedTo([page.page_name]):
action, text = 'unsubscribe', _("Unsubscribe")
else:
action, text = 'subscribe', _("Subscribe")
if action in self.request.cfg.actions_excluded:
return ""
return page.link_to(self.request, text=text,
querystr={'action': action},
css_class='btn btn-mini', rel='nofollow')
def bs_quicklink(self, page):
""" Return add/remove quicklink link
@rtype: unicode
@return: link to add or remove a quicklink
"""
if not self.request.user.valid:
return ''
_ = self.request.getText
if self.request.user.isQuickLinkedTo([page.page_name]):
action, text = 'quickunlink', _("Remove Link")
else:
action, text = 'quicklink', _("Add Link")
if action in self.request.cfg.actions_excluded:
return ""
return page.link_to(self.request, text=text,
querystr={'action': action},
css_class='btn btn-mini', rel='nofollow')
def bs_attachments(self, page):
""" Return link to page attachments """
if 'AttachFile' in self.request.cfg.actions_excluded:
return ""
_ = self.request.getText
return page.link_to(self.request,
text=_('Attachments'),
querystr={'action': 'AttachFile'},
css_class='btn btn-mini', rel='nofollow')
def disabledEdit(self):
""" Return a disabled edit link """
_ = self.request.getText
return ('<span class="disabled">%s</span>'
% _('Immutable Page'))
def editbarItems(self, page):
""" Return list of items to show on the editbar
This is separate method to make it easy to customize the
editbar in sub classes.
"""
_ = self.request.getText
editbar_actions = []
for editbar_item in self.request.cfg.edit_bar:
if (editbar_item == 'Discussion' and
(self.request.getPragma('supplementation-page', self.request.cfg.supplementation_page)
in (True, 1, 'on', '1'))):
editbar_actions.append(self.bs_discussion(page))
elif editbar_item == 'Comments':
# we just use <a> to get same style as other links, but we add some dummy
# link target to get correct mouseover pointer appearance. return false
# keeps the browser away from jumping to the link target::
editbar_actions.append('<a href="#" class="btn btn-mini toggleCommentsButton" onClick="toggleComments();return false;" style="display:none;">%s</a>' % _('Comments'))
elif editbar_item == 'Edit':
editbar_actions.append(self.bs_edit(page))
elif editbar_item == 'Info':
editbar_actions.append(self.bs_info(page))
elif editbar_item == 'Subscribe':
editbar_actions.append(self.bs_subscribe(page))
elif editbar_item == 'Quicklink':
editbar_actions.append(self.bs_quicklink(page))
elif editbar_item == 'Attachments':
editbar_actions.append(self.bs_attachments(page))
elif editbar_item == 'ActionsMenu':
editbar_actions.append(self.bs_actions(page))
return editbar_actions
def editbar(self, d):
page = d['page']
if not self.shouldShowEditbar(page) or not d['user_valid']:
return u''
html = self._cache.get('editbar')
if html is None:
# Remove empty items and format as list.
# The item for showing inline comments is hidden by default.
# It gets activated through javascript only if inline
# comments exist on the page.
items = []
for item in self.editbarItems(page):
items.append(item)
html = u'<small class="pull-right btn-toolbar">%s</small>\n' % ''.join(items)
self._cache['editbar'] = html
return html
def bs_html_stylesheets(self, d):
""" Assemble html head stylesheet links"""
leave_str = "charset=\"%s\"" % self.stylesheetsCharset
html = self.html_stylesheets(d)
return html.replace(leave_str, "")
def bs_msg(self, d):
""" Assemble the msg display """
msg = self.msg(d)
if msg != u'':
return u'''
<div class="alert">
<button type="button" class="close" data-dismiss="alert">×</button>
'''+ msg + '</div>'
return u''
def bs_custom_title(self, d):
title = self.request.getPragma('title')
if not title:
if d.has_key('title'):
title = d['title']
elif d.has_key('title_text'):
title = d['title_text']
return wikiutil.escape(title)
def bs_container_start(self):
return u'<div class="container">'
def bs_container_end(self):
return u'</div> <!-- // container -->'
def bs_site_header(self, d):
try:
return self.cfg.bs_page_header
except AttributeError:
return ''
def bs_first_header(self):
try:
return self.cfg.bs_top_header
except AttributeError:
return False
def bs_page_header(self, d):
html = []
html.append('<div class="page-header">')
if self.bs_first_header():
if d['page_front_page'] == d['page_name']:
title = self.request.cfg.sitename
else:
title = self.bs_custom_title(d)
html.append("<h1>%s" % title)
html.append(self.editbar(d))
html.append("</h1>")
else:
html.append(self.editbar(d))
html.append('</div>')
return '\n'.join(html)
def username(self, d):
""" Assemble the username / userprefs link
@param d: parameter dictionary
@rtype: unicode
@return: username html
"""
request = self.request
_ = request.getText
userlinks = []
# Add username/homepage link for registered users. We don't care
# if it exists, the user can create it.
if request.user.valid and request.user.name:
interwiki = wikiutil.getInterwikiHomePage(request)
name = request.user.name
aliasname = request.user.aliasname
if not aliasname:
aliasname = name
title = "%s" % aliasname
# link to (interwiki) user homepage
homelink = (request.formatter.interwikilink(1, title=title, id="userhome", generated=True, *interwiki) +
request.formatter.text(name) +
request.formatter.interwikilink(0, title=title, id="userhome", *interwiki))
userlinks.append(homelink)
# link to userprefs action
if 'userprefs' not in self.request.cfg.actions_excluded:
userlinks.append(d['page'].link_to(request, text=_('Settings'),
querystr={'action': 'userprefs'}, id='userprefs', rel='nofollow'))
if request.user.valid:
if request.user.auth_method in request.cfg.auth_can_logout:
userlinks.append(d['page'].link_to(request, text=_('Logout'),
querystr={'action': 'logout', 'logout': 'logout'}, id='logout', rel='nofollow'))
else:
query = {'action': 'login'}
# special direct-login link if the auth methods want no input
if request.cfg.auth_login_inputs == ['special_no_input']:
query['login'] = '1'
if request.cfg.auth_have_login:
userlinks.append(d['page'].link_to(request, text=_("Login"),
querystr=query, id='login', rel='nofollow'))
userlinks = [u'<li>%s' % link for link in userlinks]
links = ' <span class="divider">|</span></li>'.join(userlinks)
links += "%s" % request.cfg
if request.cfg.navi_bar:
links += ' <span class="divider">|</span></li>'
userlinks = []
for text in request.cfg.navi_bar:
pagename, url = self.splitNavilink(text)
userlinks.append(url)
userlinks = [u'<li>%s' % link for link in userlinks]
links += ' <span class="divider">|</span></li>'.join(userlinks)
html = u'<ul>%s</li></ul>' % links
return html
def bs_breadcrumb(self, d):
html = [u'<ul class="breadcrumb">']
try:
_var = self.cfg.bs_breadcrumb
for text, url in self.cfg.bs_breadcrumb:
markup = u'<li><a href="%s">%s</a> <span class="divider">»</span></li>' % (url, text)
html.append(markup)
except AttributeError:
pass
if self.request.action not in [u'show', u'', u'refresh']:
action = self.request.action
else:
action = False
page = wikiutil.getFrontPage(self.request)
frontpage = page.link_to_raw(self.request, text=self.request.cfg.sitename)
html.append(u'<li>%s <span class="divider">»</span></li>' % frontpage)
segments = d['page_name'].split('/')
if action:
segments.append(action)
curpage = ''
for s in segments[:-1]:
curpage += s
html.append(u'<li>%s <span class="divider">»</span></li>' % Page(self.request, curpage).link_to(self.request, s))
curpage += '/'
html.append(u'<li class="active">%s</li>' % segments[-1])
html.append(u'<li class="pull-right">%s</li>' % self.username(d))
html.append(u'</ul>')
return '\n'.join(html)
def bs_footer(self):
html = []
html.append(u'<hr><footer>')
html.append(u'<p class="pull-right"><i class="icon-arrow-up"></i><a href="#">Back to top</a></p>')
try:
html.append(self.cfg.bs_page_footer)
except AttributeError:
pass
html.append(u'</footer>')
return '\n'.join(html)
def bs_footer_js(self):
js_files = ('jquery.min.js', 'bootstrap.js')
html = ''
for js_file in js_files:
src = "%s/%s/js/%s" % (self.request.cfg.url_prefix_static, self.name, js_file)
html += '<script type="text/javascript" src="%s"></script>' % src
return html
def bs_html5_magic(self):
return u'''
<!-- Le HTML5 shim, for IE6-8 support of HTML5 elements -->
<!--[if lt IE 9]>
<script src="//html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->'''
def bs_google_analytics(self):
""" Google Analytics tracking code """
try:
_var = self.cfg.bs_ga_prop_id
except AttributeError:
return ''
if self.cfg.bs_ga_prop_id.startswith('UA-'):
return u'''<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', '%s']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>''' % self.cfg.bs_ga_prop_id
return ''
def execute(request):
"""
Generate and return a theme object
@param request: the request object
@rtype: MoinTheme
@return: Theme object
"""
return Theme(request)
| lu-zero/moinmoin-bootstrap | bootstrap.py | Python | gpl-2.0 | 32,867 |
# Copyright (c) 2013 OpenStack, LLC.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
import routes
from manila.api.middleware import fault
from manila.api.openstack import wsgi
from manila.openstack.common import log as logging
from manila import utils
from manila import wsgi as base_wsgi
LOG = logging.getLogger(__name__)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url is "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self,
member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):
"""
Routes requests on the OpenStack API to the appropriate controller
and method.
"""
ExtensionManager = None # override in subclasses
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`manila.wsgi.Router` doesn't have"""
return cls()
def __init__(self, ext_mgr=None):
if ext_mgr is None:
if self.ExtensionManager:
ext_mgr = self.ExtensionManager()
else:
raise Exception(_("Must specify an ExtensionManager class"))
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr)
self._setup_ext_routes(mapper, ext_mgr)
self._setup_extensions(ext_mgr)
super(APIRouter, self).__init__(mapper)
def _setup_ext_routes(self, mapper, ext_mgr):
for resource in ext_mgr.get_resources():
LOG.debug(_('Extended resource: %s'),
resource.collection)
wsgi_resource = wsgi.Resource(resource.controller)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
ext_name = extension.extension.name
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warning(_('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource') %
locals())
continue
LOG.debug(_('Extension %(ext_name)s extending resource: '
'%(collection)s') % locals())
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr):
raise NotImplementedError
class FaultWrapper(fault.FaultWrapper):
def __init__(self, application):
LOG.warn(_('manila.api.openstack:FaultWrapper is deprecated. Please '
'use manila.api.middleware.fault:FaultWrapper instead.'))
super(FaultWrapper, self).__init__(application)
| tucbill/manila | manila/api/openstack/__init__.py | Python | apache-2.0 | 4,673 |
from django.conf.urls.defaults import *
from django.contrib import admin
from ummeli.vlive import views
from django.views.generic.simple import redirect_to
admin.autodiscover()
urlpatterns = patterns('',
# url(r'^ummeli/', include('ummeli.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/None/$', redirect_to, {'url': '/admin/'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^health/$', views.health, name="health"),
url(r'^stats/$', views.stats, name="stats"),
url(r'^geckoboard/', include('jmbodashboard.geckoboard.urls')),
url(r'^register/$', views.mobi_register, name='register'),
url(r'', include('ummeli.vlive.urls')),
)
| praekelt/ummeli | ummeli/mobi_urls.py | Python | bsd-3-clause | 839 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class product_product(osv.osv):
_inherit = "product.product"
def _stock_move_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict([(id, {'reception_count': 0, 'delivery_count': 0}) for id in ids])
move_pool=self.pool.get('stock.move')
moves = move_pool.read_group(cr, uid, [
('product_id', 'in', ids),
('picking_id.type', '=', 'in'),
('state','in',('confirmed','assigned','pending'))
], ['product_id'], ['product_id'])
for move in moves:
product_id = move['product_id'][0]
res[product_id]['reception_count'] = move['product_id_count']
moves = move_pool.read_group(cr, uid, [
('product_id', 'in', ids),
('picking_id.type', '=', 'out'),
('state','in',('confirmed','assigned','pending'))
], ['product_id'], ['product_id'])
for move in moves:
product_id = move['product_id'][0]
res[product_id]['delivery_count'] = move['product_id_count']
return res
def get_product_accounts(self, cr, uid, product_id, context=None):
""" To get the stock input account, stock output account and stock journal related to product.
@param product_id: product id
@return: dictionary which contains information regarding stock input account, stock output account and stock journal
"""
if context is None:
context = {}
product_obj = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
stock_input_acc = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id or False
if not stock_input_acc:
stock_input_acc = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id or False
stock_output_acc = product_obj.property_stock_account_output and product_obj.property_stock_account_output.id or False
if not stock_output_acc:
stock_output_acc = product_obj.categ_id.property_stock_account_output_categ and product_obj.categ_id.property_stock_account_output_categ.id or False
journal_id = product_obj.categ_id.property_stock_journal and product_obj.categ_id.property_stock_journal.id or False
account_valuation = product_obj.categ_id.property_stock_valuation_account_id and product_obj.categ_id.property_stock_valuation_account_id.id or False
return {
'stock_account_input': stock_input_acc,
'stock_account_output': stock_output_acc,
'stock_journal': journal_id,
'property_stock_valuation_account_id': account_valuation
}
def do_change_standard_price(self, cr, uid, ids, datas, context=None):
""" Changes the Standard Price of Product and creates an account move accordingly.
@param datas : dict. contain default datas like new_price, stock_output_account, stock_input_account, stock_journal
@param context: A standard dictionary
@return:
"""
location_obj = self.pool.get('stock.location')
move_obj = self.pool.get('account.move')
move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
new_price = datas.get('new_price', 0.0)
stock_output_acc = datas.get('stock_output_account', False)
stock_input_acc = datas.get('stock_input_account', False)
journal_id = datas.get('stock_journal', False)
move_ids = []
loc_ids = location_obj.search(cr, uid,[('usage','=','internal')])
for product in self.browse(cr, uid, ids, context=context):
if product.valuation != 'real_time':
continue
account_valuation = product.categ_id.property_stock_valuation_account_id
account_valuation_id = account_valuation and account_valuation.id or False
if not account_valuation_id: raise osv.except_osv(_('Error!'), _('Specify valuation Account for Product Category: %s.') % (product.categ_id.name))
for location in location_obj.browse(cr, uid, loc_ids, context=context):
c = context.copy()
c.update({
'location': location.id,
'compute_child': False
})
qty = product.qty_available
diff = product.standard_price - new_price
if not diff: raise osv.except_osv(_('Error!'), _("No difference between standard price and new price!"))
if qty:
company_id = location.company_id and location.company_id.id or False
if not company_id: raise osv.except_osv(_('Error!'), _('Please specify company in Location.'))
#
# Accounting Entries
#
if not journal_id:
journal_id = product.categ_id.property_stock_journal and product.categ_id.property_stock_journal.id or False
if not journal_id:
raise osv.except_osv(_('Error!'),
_('Please define journal '\
'on the product category: "%s" (id: %d).') % \
(product.categ_id.name,
product.categ_id.id,))
move_id = move_obj.create(cr, uid, {
'journal_id': journal_id,
'company_id': company_id
})
move_ids.append(move_id)
if diff > 0:
if not stock_input_acc:
stock_input_acc = product.\
property_stock_account_input.id
if not stock_input_acc:
stock_input_acc = product.categ_id.\
property_stock_account_input_categ.id
if not stock_input_acc:
raise osv.except_osv(_('Error!'),
_('Please define stock input account ' \
'for this product: "%s" (id: %d).') % \
(product.name,
product.id,))
amount_diff = qty * diff
move_line_obj.create(cr, uid, {
'name': product.name,
'account_id': stock_input_acc,
'debit': amount_diff,
'move_id': move_id,
})
move_line_obj.create(cr, uid, {
'name': product.categ_id.name,
'account_id': account_valuation_id,
'credit': amount_diff,
'move_id': move_id
})
elif diff < 0:
if not stock_output_acc:
stock_output_acc = product.\
property_stock_account_output.id
if not stock_output_acc:
stock_output_acc = product.categ_id.\
property_stock_account_output_categ.id
if not stock_output_acc:
raise osv.except_osv(_('Error!'),
_('Please define stock output account ' \
'for this product: "%s" (id: %d).') % \
(product.name,
product.id,))
amount_diff = qty * -diff
move_line_obj.create(cr, uid, {
'name': product.name,
'account_id': stock_output_acc,
'credit': amount_diff,
'move_id': move_id
})
move_line_obj.create(cr, uid, {
'name': product.categ_id.name,
'account_id': account_valuation_id,
'debit': amount_diff,
'move_id': move_id
})
self.write(cr, uid, ids, {'standard_price': new_price})
return move_ids
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, user, view_id, view_type, context)
if res: return res
if (context.get('active_id', False)) and (context.get('active_model') == 'stock.location'):
return _('Products: ')+self.pool.get('stock.location').browse(cr, user, context['active_id'], context).name
return res
def get_product_available(self, cr, uid, ids, context=None):
""" Finds whether product is available or not in particular warehouse.
@return: Dictionary of values
"""
if context is None:
context = {}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
shop_obj = self.pool.get('sale.shop')
states = context.get('states',[])
what = context.get('what',())
if not ids:
ids = self.search(cr, uid, [])
res = {}.fromkeys(ids, 0.0)
if not ids:
return res
if context.get('shop', False):
warehouse_id = shop_obj.read(cr, uid, int(context['shop']), ['warehouse_id'])['warehouse_id'][0]
if warehouse_id:
context['warehouse'] = warehouse_id
if context.get('warehouse', False):
lot_id = warehouse_obj.read(cr, uid, int(context['warehouse']), ['lot_stock_id'])['lot_stock_id'][0]
if lot_id:
context['location'] = lot_id
if context.get('location', False):
if type(context['location']) == type(1):
location_ids = [context['location']]
elif type(context['location']) in (type(''), type(u'')):
location_ids = location_obj.search(cr, uid, [('name','ilike',context['location'])], context=context)
else:
location_ids = context['location']
else:
location_ids = []
wids = warehouse_obj.search(cr, uid, [], context=context)
if not wids:
return res
for w in warehouse_obj.browse(cr, uid, wids, context=context):
location_ids.append(w.lot_stock_id.id)
# build the list of ids of children of the location given by id
if context.get('compute_child',True):
child_location_ids = location_obj.search(cr, uid, [('location_id', 'child_of', location_ids)])
location_ids = child_location_ids or location_ids
# this will be a dictionary of the product UoM by product id
product2uom = {}
uom_ids = []
for product in self.read(cr, uid, ids, ['uom_id'], context=context):
product2uom[product['id']] = product['uom_id'][0]
uom_ids.append(product['uom_id'][0])
# this will be a dictionary of the UoM resources we need for conversion purposes, by UoM id
uoms_o = {}
for uom in self.pool.get('product.uom').browse(cr, uid, uom_ids, context=context):
uoms_o[uom.id] = uom
results = []
results2 = []
from_date = context.get('from_date',False)
to_date = context.get('to_date',False)
date_str = False
date_values = False
where = [tuple(location_ids),tuple(location_ids),tuple(ids),tuple(states)]
if from_date and to_date:
date_str = "date>=%s and date<=%s"
where.append(tuple([from_date]))
where.append(tuple([to_date]))
elif from_date:
date_str = "date>=%s"
date_values = [from_date]
elif to_date:
date_str = "date<=%s"
date_values = [to_date]
if date_values:
where.append(tuple(date_values))
prodlot_id = context.get('prodlot_id', False)
prodlot_clause = ''
if prodlot_id:
prodlot_clause = ' and prodlot_id = %s '
where += [prodlot_id]
# TODO: perhaps merge in one query.
if 'in' in what:
# all moves from a location out of the set to a location in the set
cr.execute(
'select sum(product_qty), product_id, product_uom '\
'from stock_move '\
'where location_id NOT IN %s '\
'and location_dest_id IN %s '\
'and product_id IN %s '\
'and state IN %s ' + (date_str and 'and '+date_str+' ' or '') +' '\
+ prodlot_clause +
'group by product_id,product_uom',tuple(where))
results = cr.fetchall()
if 'out' in what:
# all moves from a location in the set to a location out of the set
cr.execute(
'select sum(product_qty), product_id, product_uom '\
'from stock_move '\
'where location_id IN %s '\
'and location_dest_id NOT IN %s '\
'and product_id IN %s '\
'and state in %s ' + (date_str and 'and '+date_str+' ' or '') + ' '\
+ prodlot_clause +
'group by product_id,product_uom',tuple(where))
results2 = cr.fetchall()
# Get the missing UoM resources
uom_obj = self.pool.get('product.uom')
uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2)
if context.get('uom', False):
uoms += [context['uom']]
uoms = filter(lambda x: x not in uoms_o.keys(), uoms)
if uoms:
uoms = uom_obj.browse(cr, uid, list(set(uoms)), context=context)
for o in uoms:
uoms_o[o.id] = o
#TOCHECK: before change uom of product, stock move line are in old uom.
context.update({'raise-exception': False})
# Count the incoming quantities
for amount, prod_id, prod_uom in results:
amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,
uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)
res[prod_id] += amount
# Count the outgoing quantities
for amount, prod_id, prod_uom in results2:
amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,
uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)
res[prod_id] -= amount
return res
def _product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):
""" Finds the incoming and outgoing quantity of product.
@return: Dictionary of values
"""
if not field_names:
field_names = []
if context is None:
context = {}
res = {}
for id in ids:
res[id] = {}.fromkeys(field_names, 0.0)
for f in field_names:
c = context.copy()
if f == 'qty_available':
c.update({ 'states': ('done',), 'what': ('in', 'out') })
if f == 'virtual_available':
c.update({ 'states': ('confirmed','waiting','assigned','done'), 'what': ('in', 'out') })
if f == 'incoming_qty':
c.update({ 'states': ('confirmed','waiting','assigned'), 'what': ('in',) })
if f == 'outgoing_qty':
c.update({ 'states': ('confirmed','waiting','assigned'), 'what': ('out',) })
stock = self.get_product_available(cr, uid, ids, context=c)
for id in ids:
res[id][f] = stock.get(id, 0.0)
return res
_columns = {
'reception_count': fields.function(_stock_move_count, string="Reception", type='integer', multi='pickings'),
'delivery_count': fields.function(_stock_move_count, string="Delivery", type='integer', multi='pickings'),
'qty_available': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Quantity On Hand',
help="Current quantity of products.\n"
"In a context with a single Stock Location, this includes "
"goods stored at this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, or any "
"of its children.\n"
"In a context with a single Shop, this includes goods "
"stored in the Stock Location of the Warehouse of this Shop, "
"or any of its children.\n"
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."),
'virtual_available': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Forecasted Quantity',
help="Forecast quantity (computed as Quantity On Hand "
"- Outgoing + Incoming)\n"
"In a context with a single Stock Location, this includes "
"goods stored in this location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, or any "
"of its children.\n"
"In a context with a single Shop, this includes goods "
"stored in the Stock Location of the Warehouse of this Shop, "
"or any of its children.\n"
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."),
'incoming_qty': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Incoming',
help="Quantity of products that are planned to arrive.\n"
"In a context with a single Stock Location, this includes "
"goods arriving to this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods arriving to the Stock Location of this Warehouse, or "
"any of its children.\n"
"In a context with a single Shop, this includes goods "
"arriving to the Stock Location of the Warehouse of this "
"Shop, or any of its children.\n"
"Otherwise, this includes goods arriving to any Stock "
"Location with 'internal' type."),
'outgoing_qty': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Outgoing',
help="Quantity of products that are planned to leave.\n"
"In a context with a single Stock Location, this includes "
"goods leaving this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods leaving the Stock Location of this Warehouse, or "
"any of its children.\n"
"In a context with a single Shop, this includes goods "
"leaving the Stock Location of the Warehouse of this "
"Shop, or any of its children.\n"
"Otherwise, this includes goods leaving any Stock "
"Location with 'internal' type."),
'track_production': fields.boolean('Track Manufacturing Lots', help="Forces to specify a Serial Number for all moves containing this product and generated by a Manufacturing Order"),
'track_incoming': fields.boolean('Track Incoming Lots', help="Forces to specify a Serial Number for all moves containing this product and coming from a Supplier Location"),
'track_outgoing': fields.boolean('Track Outgoing Lots', help="Forces to specify a Serial Number for all moves containing this product and going to a Customer Location"),
'location_id': fields.dummy(string='Location', relation='stock.location', type='many2one'),
'warehouse_id': fields.dummy(string='Warehouse', relation='stock.warehouse', type='many2one'),
'valuation':fields.selection([('manual_periodic', 'Periodical (manual)'),
('real_time','Real Time (automated)'),], 'Inventory Valuation',
help="If real-time valuation is enabled for a product, the system will automatically write journal entries corresponding to stock moves." \
"The inventory variation account set on the product category will represent the current inventory value, and the stock input and stock output account will hold the counterpart moves for incoming and outgoing products."
, required=True),
}
_defaults = {
'valuation': 'manual_periodic',
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(product_product,self).fields_view_get(cr, uid, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
if context is None:
context = {}
if ('location' in context) and context['location']:
location_info = self.pool.get('stock.location').browse(cr, uid, context['location'])
fields=res.get('fields',{})
if fields:
if location_info.usage == 'supplier':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Receptions')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Received Qty')
if location_info.usage == 'internal':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Stock')
if location_info.usage == 'customer':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Deliveries')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Delivered Qty')
if location_info.usage == 'inventory':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future P&L')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('P&L Qty')
if location_info.usage == 'procurement':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Qty')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Unplanned Qty')
if location_info.usage == 'production':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Productions')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Produced Qty')
return res
product_product()
class product_template(osv.osv):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'property_stock_procurement': fields.property(
'stock.location',
type='many2one',
relation='stock.location',
string="Procurement Location",
view_load=True,
domain=[('usage','like','procurement')],
help="This stock location will be used, instead of the default one, as the source location for stock moves generated by procurements."),
'property_stock_production': fields.property(
'stock.location',
type='many2one',
relation='stock.location',
string="Production Location",
view_load=True,
domain=[('usage','like','production')],
help="This stock location will be used, instead of the default one, as the source location for stock moves generated by manufacturing orders."),
'property_stock_inventory': fields.property(
'stock.location',
type='many2one',
relation='stock.location',
string="Inventory Location",
view_load=True,
domain=[('usage','like','inventory')],
help="This stock location will be used, instead of the default one, as the source location for stock moves generated when you do an inventory."),
'property_stock_account_input': fields.property('account.account',
type='many2one', relation='account.account',
string='Stock Input Account', view_load=True,
help="When doing real-time inventory valuation, counterpart journal items for all incoming stock moves will be posted in this account, unless "
"there is a specific valuation account set on the source location. When not set on the product, the one from the product category is used."),
'property_stock_account_output': fields.property('account.account',
type='many2one', relation='account.account',
string='Stock Output Account', view_load=True,
help="When doing real-time inventory valuation, counterpart journal items for all outgoing stock moves will be posted in this account, unless "
"there is a specific valuation account set on the destination location. When not set on the product, the one from the product category is used."),
'sale_delay': fields.float('Customer Lead Time', help="The average delay in days between the confirmation of the customer order and the delivery of the finished products. It's the time you promise to your customers."),
'loc_rack': fields.char('Rack', size=16),
'loc_row': fields.char('Row', size=16),
'loc_case': fields.char('Case', size=16),
}
_defaults = {
'sale_delay': 7,
}
product_template()
class product_category(osv.osv):
_inherit = 'product.category'
_columns = {
'property_stock_journal': fields.property('account.journal',
relation='account.journal', type='many2one',
string='Stock Journal', view_load=True,
help="When doing real-time inventory valuation, this is the Accounting Journal in which entries will be automatically posted when stock moves are processed."),
'property_stock_account_input_categ': fields.property('account.account',
type='many2one', relation='account.account',
string='Stock Input Account', view_load=True,
help="When doing real-time inventory valuation, counterpart journal items for all incoming stock moves will be posted in this account, unless "
"there is a specific valuation account set on the source location. This is the default value for all products in this category. It "
"can also directly be set on each product"),
'property_stock_account_output_categ': fields.property('account.account',
type='many2one', relation='account.account',
string='Stock Output Account', view_load=True,
help="When doing real-time inventory valuation, counterpart journal items for all outgoing stock moves will be posted in this account, unless "
"there is a specific valuation account set on the destination location. This is the default value for all products in this category. It "
"can also directly be set on each product"),
'property_stock_valuation_account_id': fields.property('account.account',
type='many2one',
relation='account.account',
string="Stock Valuation Account",
view_load=True,
help="When real-time inventory valuation is enabled on a product, this account will hold the current value of the products.",),
}
product_category()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| kevin8909/xjerp | openerp/addons/stock/product.py | Python | agpl-3.0 | 30,499 |
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
from .library import *
from .array import *
def mean(a, weights=None, dim=None):
if dim is not None:
out = Array()
if weights is None:
safe_call(backend.get().af_mean(ct.pointer(out.arr), a.arr, ct.c_int(dim)))
else:
safe_call(backend.get().af_mean_weighted(ct.pointer(out.arr), a.arr, weights.arr, ct.c_int(dim)))
return out
else:
real = ct.c_double(0)
imag = ct.c_double(0)
if weights is None:
safe_call(backend.get().af_mean_all(ct.pointer(real), ct.pointer(imag), a.arr))
else:
safe_call(backend.get().af_mean_all_weighted(ct.pointer(real), ct.pointer(imag), a.arr, weights.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def var(a, isbiased=False, weights=None, dim=None):
if dim is not None:
out = Array()
if weights is None:
safe_call(backend.get().af_var(ct.pointer(out.arr), a.arr, isbiased, ct.c_int(dim)))
else:
safe_call(backend.get().af_var_weighted(ct.pointer(out.arr), a.arr, weights.arr, ct.c_int(dim)))
return out
else:
real = ct.c_double(0)
imag = ct.c_double(0)
if weights is None:
safe_call(backend.get().af_var_all(ct.pointer(real), ct.pointer(imag), a.arr, isbiased))
else:
safe_call(backend.get().af_var_all_weighted(ct.pointer(real), ct.pointer(imag), a.arr, weights.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def stdev(a, dim=None):
if dim is not None:
out = Array()
safe_call(backend.get().af_stdev(ct.pointer(out.arr), a.arr, ct.c_int(dim)))
return out
else:
real = ct.c_double(0)
imag = ct.c_double(0)
safe_call(backend.get().af_stdev_all(ct.pointer(real), ct.pointer(imag), a.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def cov(a, isbiased=False, dim=None):
if dim is not None:
out = Array()
safe_call(backend.get().af_cov(ct.pointer(out.arr), a.arr, isbiased, ct.c_int(dim)))
return out
else:
real = ct.c_double(0)
imag = ct.c_double(0)
safe_call(backend.get().af_cov_all(ct.pointer(real), ct.pointer(imag), a.arr, isbiased))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def median(a, dim=None):
if dim is not None:
out = Array()
safe_call(backend.get().af_median(ct.pointer(out.arr), a.arr, ct.c_int(dim)))
return out
else:
real = ct.c_double(0)
imag = ct.c_double(0)
safe_call(backend.get().af_median_all(ct.pointer(real), ct.pointer(imag), a.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def corrcoef(x, y):
real = ct.c_double(0)
imag = ct.c_double(0)
safe_call(backend.get().af_corrcoef(ct.pointer(real), ct.pointer(imag), x.arr, y.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
| syurkevi/arrayfire-python | arrayfire/statistics.py | Python | bsd-3-clause | 3,560 |
#!/usr/bin/env python
# encoding: utf-8
"""
problem_22.py
Created by James Jones on 2015-02-04.
Problem 22 from https://projecteuler.net/problem=22
Description:
Using names.txt, a 46K text file containing over five-thousand first names, begin by sorting it into alphabetical order. Then working out the alphabetical value for each name, multiply this value by its alphabetical position in the list to obtain a name score.
For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 = 49714.
What is the total of all the name scores in the file?
Sample Output:
871198282
Reason for picking problem: I have a secret love for data processing.
References:
Time took: 30 minutes
"""
names = []
def loadNames():
global names
#open file
f = open("names.txt",'r')
#split each name off and remove quotes
for x in f:
for y in x.split(","):
y = y.replace('"','')
names.append(y)
names.sort()
def main():
loadNames()
#totol vlaue
tv = 0
#using enumerate to index number and value
for i,v in enumerate(names):
#setting place value
pv = i + 1
#initializing name value
nv = 0
#converting name to list for easier calculating
name = list(v)
for x in name:
nv += ord(x) - 64
tv += nv * pv
print tv
if __name__ == '__main__':
main()
| outcast/projecteuler | 22/problem_22.py | Python | unlicense | 1,417 |
# coding=utf-8
from fsfunc import fs
from kernel.filesystem import root_iNode_name
from utils.decorator.synchronizer import syncClassBase,sync,sync_
class session(syncClassBase):
'''A wrapper for fsfunc that record working directory'''
def __init__(self,io):
syncClassBase.__init__(self)
self.fs=fs(io)
self.d=root_iNode_name
@sync
def cd(self,path):
self.d=self.fs.locate(path,self.d)
def ls(self):
return self.fs.list(self.d)
def mkdir(self,foldername):
self.fs.mkdir(foldername,self.d)
def rm(self,foldername):
self.fs.rm(foldername,self.d)
from kernel.distributedvc.demonoupload import demoio
this=session(demoio)
| levythu/swift-layerC | kernel/filesystem/session.py | Python | gpl-2.0 | 710 |
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Copyright (c) 2010 Chris AtLee
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Streaming HTTP uploads module.
This module extends the standard httplib and urllib2 objects so that
iterable objects can be used in the body of HTTP requests.
In most cases all one should have to do is call :func:`register_openers()`
to register the new streaming http handlers which will take priority over
the default handlers, and then you can use iterable objects in the body
of HTTP requests.
**N.B.** You must specify a Content-Length header if using an iterable object
since there is no way to determine in advance the total size that will be
yielded, and there is no way to reset an interator.
Example usage:
>>> from StringIO import StringIO
>>> import urllib2, poster.streaminghttp
>>> opener = poster.streaminghttp.register_openers()
>>> s = "Test file data"
>>> f = StringIO(s)
>>> req = urllib2.Request("http://localhost:5000", f,
... {'Content-Length': str(len(s))})
"""
from __future__ import division
import httplib, urllib2, socket
from httplib import NotConnected
__all__ = ['StreamingHTTPConnection', 'StreamingHTTPRedirectHandler',
'StreamingHTTPHandler', 'register_openers']
if hasattr(httplib, 'HTTPS'):
__all__.extend(['StreamingHTTPSHandler', 'StreamingHTTPSConnection'])
class _StreamingHTTPMixin(object):
"""Mixin class for HTTP and HTTPS connections that implements a streaming
send method."""
def send(self, value):
"""Send ``value`` to the server.
``value`` can be a string object, a file-like object that supports
a .read() method, or an iterable object that supports a .next()
method.
"""
# Based on python 2.6's httplib.HTTPConnection.send()
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
# send the data to the server. if we get a broken pipe, then close
# the socket. we want to reconnect when somebody tries to send again.
#
# NOTE: we DO propagate the error, though, because we cannot simply
# ignore the error... the caller will know if they can retry.
if self.debuglevel > 0:
print "send:", repr(value)
try:
blocksize = 8192
if hasattr(value, 'read') :
if hasattr(value, 'seek'):
value.seek(0)
if self.debuglevel > 0:
print "sendIng a read()able"
data = value.read(blocksize)
while data:
self.sock.sendall(data)
data = value.read(blocksize)
elif hasattr(value, 'next'):
if hasattr(value, 'reset'):
value.reset()
if self.debuglevel > 0:
print "sendIng an iterable"
for data in value:
self.sock.sendall(data)
else:
self.sock.sendall(value)
except socket.error, v:
if v[0] == 32: # Broken pipe
self.close()
raise
class StreamingHTTPConnection(_StreamingHTTPMixin, httplib.HTTPConnection):
"""Subclass of `httplib.HTTPConnection` that overrides the `send()` method
to support iterable body objects"""
class StreamingHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
"""Subclass of `urllib2.HTTPRedirectHandler` that overrides the
`redirect_request` method to properly handle redirected POST requests
This class is required because python 2.5's HTTPRedirectHandler does
not remove the Content-Type or Content-Length headers when requesting
the new resource, but the body of the original request is not preserved.
"""
handler_order = urllib2.HTTPRedirectHandler.handler_order - 1
# From python2.6 urllib2's HTTPRedirectHandler
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we
# do the same.
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in (
"content-length", "content-type")
)
return urllib2.Request(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
class StreamingHTTPHandler(urllib2.HTTPHandler):
"""Subclass of `urllib2.HTTPHandler` that uses
StreamingHTTPConnection as its http connection class."""
handler_order = urllib2.HTTPHandler.handler_order - 1
def http_open(self, req):
"""Open a StreamingHTTPConnection for the given request"""
return self.do_open(StreamingHTTPConnection, req)
def http_request(self, req):
"""Handle a HTTP request. Make sure that Content-Length is specified
if we're using an interable value"""
# Make sure that if we're using an iterable object as the request
# body, that we've also specified Content-Length
if req.has_data():
data = req.get_data()
if hasattr(data, 'read') or hasattr(data, 'next'):
if not req.has_header('Content-length'):
raise ValueError(
"No Content-Length specified for iterable body")
return urllib2.HTTPHandler.do_request_(self, req)
if hasattr(httplib, 'HTTPS'):
class StreamingHTTPSConnection(_StreamingHTTPMixin,
httplib.HTTPSConnection):
"""Subclass of `httplib.HTTSConnection` that overrides the `send()`
method to support iterable body objects"""
class StreamingHTTPSHandler(urllib2.HTTPSHandler):
"""Subclass of `urllib2.HTTPSHandler` that uses
StreamingHTTPSConnection as its http connection class."""
handler_order = urllib2.HTTPSHandler.handler_order - 1
def https_open(self, req):
return self.do_open(StreamingHTTPSConnection, req)
def https_request(self, req):
# Make sure that if we're using an iterable object as the request
# body, that we've also specified Content-Length
if req.has_data():
data = req.get_data()
if hasattr(data, 'read') or hasattr(data, 'next'):
if not req.has_header('Content-length'):
raise ValueError(
"No Content-Length specified for iterable body")
return urllib2.HTTPSHandler.do_request_(self, req)
def register_openers(cookiejar=None):
"""Register the streaming http handlers in the global urllib2 default
opener object.
Returns the created OpenerDirector object."""
handlers = [StreamingHTTPHandler, StreamingHTTPRedirectHandler]
if hasattr(httplib, "HTTPS"):
handlers.append(StreamingHTTPSHandler)
if cookiejar:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar), *handlers)
else:
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
return opener
| hjanime/VisTrails | vistrails/core/repository/poster/streaminghttp.py | Python | bsd-3-clause | 11,197 |
def test_python2_is_installed(host):
python_file = host.file("/usr/bin/python2")
assert python_file.exists
| samvarankashyap/linch-pin | docs/source/examples/workspaces/testinfra/hooks/testinfra/test_python.py | Python | gpl-3.0 | 115 |
import hashlib
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.patches import Rectangle
import os
import shutil
import tempfile
from sar_parser import SarParser
# If the there are more than 50 plots in a graph we move the legend to the
# bottom
LEGEND_THRESHOLD = 50
def ascii_date(d):
return "%s" % (d.strftime("%Y-%m-%d %H:%M"))
class SarGrapher(object):
def __init__(self, filenames, starttime=None, endtime=None):
"""Initializes the class, creates a SarParser class
given a list of files and also parsers the files"""
# Temporary dir where images are stored (one per graph)
# NB: This is done to keep the memory usage constant
# in spite of being a bit slower (before this change
# we could use > 12GB RAM for a simple sar file -
# matplotlib is simply inefficient in this area)
self._tempdir = tempfile.mkdtemp(prefix='sargrapher')
self.sar_parser = SarParser(filenames, starttime, endtime)
self.sar_parser.parse()
duplicate_timestamps = self.sar_parser._duplicate_timestamps
if duplicate_timestamps:
print("There are {0} lines with duplicate timestamps. First 10"
"line numbers at {1}".format(
len(duplicate_timestamps.keys()),
sorted(list(duplicate_timestamps.keys()))[:10]))
def _graph_filename(self, graph, extension='.png'):
"""Creates a unique constant file name given a graph or graph list"""
if isinstance(graph, list):
temp = "_".join(graph)
else:
temp = graph
temp = temp.replace('%', '_')
temp = temp.replace('/', '_')
digest = hashlib.sha1()
digest.update(temp.encode('utf-8'))
fname = os.path.join(self._tempdir, digest.hexdigest() + extension)
return fname
def datasets(self):
"""Returns a list of all the available datasets"""
return self.sar_parser.available_data_types()
def timestamps(self):
"""Returns a list of all the available datasets"""
return sorted(self.sar_parser.available_timestamps())
def plot_datasets(self, data, fname, extra_labels, showreboots=False,
output='pdf'):
""" Plot timeseries data (of type dataname). The data can be either
simple (one or no datapoint at any point in time, or indexed (by
indextype). dataname is assumed to be in the form of [title, [label1,
label2, ...], [data1, data2, ...]] extra_labels is a list of tuples
[(datetime, 'label'), ...] """
sar_parser = self.sar_parser
title = data[0][0]
unit = data[0][1]
axis_labels = data[0][2]
datanames = data[1]
if not isinstance(datanames, list):
raise Exception("plottimeseries expects a list of datanames: %s" %
data)
fig = plt.figure(figsize=(10.5, 6.5))
axes = fig.add_subplot(111)
axes.set_title('{0} time series'.format(title), fontsize=12)
axes.set_xlabel('Time')
axes.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
# Twenty minutes. Could probably make it a parameter
axes.xaxis.set_minor_locator(mdates.MinuteLocator(interval=20))
fig.autofmt_xdate()
ylabel = title
if unit:
ylabel += " - " + unit
axes.set_ylabel(ylabel)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes.yaxis.set_major_formatter(y_formatter)
axes.yaxis.get_major_formatter().set_scientific(False)
color_norm = colors.Normalize(vmin=0, vmax=len(datanames) - 1)
scalar_map = cm.ScalarMappable(norm=color_norm,
cmap=plt.get_cmap('Set1'))
timestamps = self.timestamps()
counter = 0
for i in datanames:
try:
dataset = [sar_parser._data[d][i] for d in timestamps]
except:
print("Key {0} does not exist in this graph".format(i))
raise
axes.plot(timestamps, dataset, 'o:', label=axis_labels[counter],
color=scalar_map.to_rgba(counter))
counter += 1
# Draw extra_labels
if extra_labels:
for extra in extra_labels:
axes.annotate(extra[1], xy=(mdates.date2num(extra[0]),
sar_parser.find_max(extra[0], datanames)),
xycoords='data', xytext=(30, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2"))
# If we have a sosreport draw the reboots
if showreboots and sar_parser.sosreport is not None and \
sar_parser.sosreport.reboots is not None:
reboots = sar_parser.sosreport.reboots
for reboot in reboots.keys():
reboot_date = reboots[reboot]['date']
rboot_x = mdates.date2num(reboot_date)
(xmin, xmax) = plt.xlim()
(ymin, ymax) = plt.ylim()
if rboot_x < xmin or rboot_x > xmax:
continue
axes.annotate('', xy=(mdates.date2num(reboot_date), ymin),
xycoords='data', xytext=(-30, -30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->", color='blue',
connectionstyle="arc3,rad=-0.1"))
# Show any data collection gaps in the graph
gaps = sar_parser.find_data_gaps()
if len(gaps) > 0:
for i in gaps:
(g1, g2) = i
x1 = mdates.date2num(g1)
x2 = mdates.date2num(g2)
(ymin, ymax) = plt.ylim()
axes.add_patch(Rectangle((x1, ymin), x2 - x1,
ymax - ymin, facecolor="lightgrey"))
# Add a grid to the graph to ease visualization
axes.grid(True)
lgd = None
# Draw the legend only when needed
if len(datanames) > 1 or \
(len(datanames) == 1 and len(datanames[0].split('#')) > 1):
# We want the legends box roughly square shaped
# and not take up too much room
props = matplotlib.font_manager.FontProperties(size='xx-small')
if len(datanames) < LEGEND_THRESHOLD:
cols = int((len(datanames) ** 0.5))
lgd = axes.legend(loc=1, ncol=cols, shadow=True, prop=props)
else:
cols = int(len(datanames) ** 0.6)
lgd = axes.legend(loc=9, ncol=cols,
bbox_to_anchor=(0.5, -0.29),
shadow=True, prop=props)
if len(datanames) == 0:
return None
try:
if lgd:
plt.savefig(fname, bbox_extra_artists=(lgd,),
bbox_inches='tight')
else:
plt.savefig(fname, bbox_inches='tight')
except:
import traceback
print(traceback.format_exc())
import sys
sys.exit(-1)
plt.cla()
plt.clf()
plt.close('all')
def plot_svg(self, graphs, output, labels):
"""Given a list of graphs, output an svg file per graph.
Input is a list of strings. A graph with multiple datasets
is a string with datasets separated by comma"""
if output == 'out.pdf':
output = 'graph'
counter = 1
fnames = []
for i in graphs:
subgraphs = i.split(',')
fname = self._graph_filename(subgraphs, '.svg')
fnames.append(fname)
self.plot_datasets((['', None, subgraphs], subgraphs), fname,
labels)
dest = os.path.join(os.getcwd(), "{0}{1}.svg".format(
output, counter))
shutil.move(fname, dest)
print("Created: {0}".format(dest))
counter += 1
# removes all temporary files and directories
self.close()
def plot_ascii(self, graphs, def_columns=80, def_rows=25):
"""Displays a single graph in ASCII form on the terminal"""
import subprocess
sar_parser = self.sar_parser
timestamps = self.timestamps()
try:
rows, columns = os.popen('stty size', 'r').read().split()
except:
columns = def_columns
rows = def_rows
if columns > def_columns:
columns = def_columns
for graph in graphs:
try:
gnuplot = subprocess.Popen(["/usr/bin/gnuplot"],
stdin=subprocess.PIPE)
except Exception as e:
raise("Error launching gnuplot: {0}".format(e))
gnuplot.stdin.write("set term dumb {0} {1}\n".format(
columns, rows))
gnuplot.stdin.write("set xdata time\n")
gnuplot.stdin.write('set xlabel "Time"\n')
gnuplot.stdin.write('set timefmt \"%Y-%m-%d %H:%M\"\n')
gnuplot.stdin.write('set xrange [\"%s\":\"%s\"]\n' %
(ascii_date(timestamps[0]),
ascii_date(timestamps[-1])))
gnuplot.stdin.write('set ylabel "%s"\n' % (graph))
gnuplot.stdin.write('set datafile separator ","\n')
gnuplot.stdin.write('set autoscale y\n')
gnuplot.stdin.write('set title "%s - %s"\n' %
(graph, " ".join(sar_parser._files)))
# FIXME: do it through a method
try:
dataset = [sar_parser._data[d][graph] for d in timestamps]
except KeyError:
print("Key '{0}' could not be found")
return
txt = "plot '-' using 1:2 title '{0}' with linespoints \n".format(
graph)
gnuplot.stdin.write(txt)
for i, j in zip(timestamps, dataset):
s = '\"%s\",%f\n' % (ascii_date(i), j)
gnuplot.stdin.write(s)
gnuplot.stdin.write("e\n")
gnuplot.stdin.write("exit\n")
gnuplot.stdin.flush()
def export_csv(self):
return
def close(self):
"""Removes temporary directory and files"""
if os.path.isdir(self._tempdir):
shutil.rmtree(self._tempdir)
| mbaldessari/sarstats | sar_grapher.py | Python | gpl-2.0 | 10,858 |
#----------------------------------------------------------------------
# Copyright (c) 2015 Inria by David Margery
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
An aggregate manager delegate that raises an APIErrorException to test
behavior in gcf.geni.am.am3.AggregateManager when calling a delagate
that raises an exception.
"""
import gcf.geni.am.am3 as am3
class ExceptionRaiserDelegate(am3.ReferenceAggregateManager):
def __init__(self, root_cert, urn_authority, url, **kwargs):
super(ExceptionRaiserDelegate,self).__init__(root_cert,urn_authority,url,**kwargs)
def Shutdown(self, slice_urn, credentials, options):
raise am3.ApiErrorException(am3.AM_API.REFUSED, "test exception")
| tcmitchell/geni-tools | src/gcf/geni/am/test_ams.py | Python | mit | 1,765 |
# -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sitecheck', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ApiClientState',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sleep_until', models.DateTimeField(null=True)),
('max_concurrent_assessments', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RequestLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('uuid', models.UUIDField(max_length=32)),
('request_url', models.CharField(max_length=1000)),
('request_headers', models.TextField()),
('request_body', models.TextField(null=True)),
('response_code', models.IntegerField(null=True)),
('response_headers', models.TextField(null=True)),
('response_body', models.TextField(null=True)),
('sitecheck', models.ForeignKey(related_name='requestlogs', to='sitecheck.SiteCheck', null=True, on_delete=models.PROTECT)),
],
options={
'ordering': ['-datetime'],
},
bases=(models.Model,),
),
]
| tykling/tlsscout | src/ssllabs/migrations/0001_initial.py | Python | bsd-3-clause | 1,636 |
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
from . import ffi, librtmp
from .aval import AVal
from .compat import is_py2, bytes, range
from .exceptions import AMFError
AMF_STRING_TYPES = (librtmp.AMF_STRING, librtmp.AMF_LONG_STRING)
AMF_OBJECT_DICT_TYPES = (librtmp.AMF_OBJECT, librtmp.AMF_ECMA_ARRAY)
__all__ = ["AMFObject", "decode_amf", "encode_amf"]
class AMFObject(dict):
pass
def _create_buffer(size):
pbuf = ffi.new("char[]", size)
pend = pbuf + size
buf = ffi.buffer(pbuf, size)
return pbuf, pend, buf
def _encode_key_name(key):
key = bytes(key, "utf8")
key_len = len(key)
pbuf, pend, buf = _create_buffer(key_len + 2)
librtmp.AMF_EncodeInt16(pbuf, pend, key_len)
buf[2:key_len + 2] = key
return buf[:]
@singledispatch
def encoder(val):
raise AMFError("Unable to encode '{0}' type".format(type(val).__name__))
@encoder.register(type(None))
def _encode_none(val):
return bytes((librtmp.AMF_NULL,))
@encoder.register(str)
def _encode_str(val):
val = AVal(val)
pbuf, pend, buf = _create_buffer(val.aval.av_len + 1 + 4)
res = librtmp.AMF_EncodeString(pbuf, pend, val.aval)
size = res - pbuf
return buf[:size]
if is_py2:
encoder.register(unicode, _encode_str)
@encoder.register(float)
@encoder.register(int)
def _encode_number(val):
val = float(val)
pbuf, pend, buf = _create_buffer(9)
librtmp.AMF_EncodeNumber(pbuf, pend, val)
return buf[:]
if is_py2:
encoder.register(long, _encode_number)
@encoder.register(bool)
def _encode_boolean(val):
pbuf, pend, buf = _create_buffer(2)
librtmp.AMF_EncodeBoolean(pbuf, pend, int(val))
return buf[:]
@encoder.register(AMFObject)
def _encode_object(val):
phead, headend, head = _create_buffer(4)
head[0] = bytes((librtmp.AMF_OBJECT,))
librtmp.AMF_EncodeInt24(phead + 1, headend, librtmp.AMF_OBJECT_END)
body = bytearray()
for key, value in val.items():
body += _encode_key_name(key)
body += encoder(value)
return head[:1] + bytes(body) + head[1:]
@encoder.register(dict)
def _encode_ecma_array(val):
phead, headend, head = _create_buffer(8)
head[0] = bytes((librtmp.AMF_ECMA_ARRAY,))
librtmp.AMF_EncodeInt32(phead + 1, headend, len(val))
librtmp.AMF_EncodeInt24(phead + 5, headend, librtmp.AMF_OBJECT_END)
body = bytearray()
for key, value in val.items():
body += _encode_key_name(key)
body += encoder(value)
return head[:5] + bytes(body) + head[5:]
@encoder.register(list)
def _encode_array(val):
phead, headend, head = _create_buffer(5)
head[0] = bytes((librtmp.AMF_STRICT_ARRAY,))
librtmp.AMF_EncodeInt32(phead + 1, headend, len(val))
body = bytearray()
for value in val:
body += encoder(value)
return head[:] + bytes(body)
def _decode_prop(prop):
prop_type = librtmp.AMFProp_GetType(prop)
if prop_type == librtmp.AMF_NUMBER:
val = librtmp.AMFProp_GetNumber(prop)
elif prop_type in AMF_STRING_TYPES:
aval = AVal()
librtmp.AMFProp_GetString(prop, aval.aval)
val = aval.value.decode("utf8", "ignore")
elif prop_type == librtmp.AMF_BOOLEAN:
val = bool(librtmp.AMFProp_GetBoolean(prop))
elif prop_type in AMF_OBJECT_DICT_TYPES:
if prop_type == librtmp.AMF_OBJECT:
val = AMFObject()
else:
val = dict()
for key, value in _decode_prop_obj(prop):
val[key] = value
elif prop_type == librtmp.AMF_STRICT_ARRAY:
val = []
for key, value in _decode_prop_obj(prop):
val.append(value)
else:
val = None
return val
def _decode_prop_obj(prop):
obj = ffi.new("AMFObject*")
librtmp.AMFProp_GetObject(prop, obj)
prop_count = librtmp.AMF_CountProp(obj)
for i in range(prop_count):
prop = librtmp.AMF_GetProp(obj, ffi.NULL, i)
key = AVal()
librtmp.AMFProp_GetName(prop, key.aval)
key = key.value.decode("utf8", "ignore")
value = _decode_prop(prop)
yield key, value
def encode_amf(value):
return encoder(value)
def decode_amf(body):
obj = ffi.new("AMFObject*")
res = librtmp.AMF_Decode(obj, body, len(body), 0)
if res == ffi.NULL:
raise AMFError("Unable to decode AMF data")
rval = []
prop_count = librtmp.AMF_CountProp(obj)
for i in range(prop_count):
prop = librtmp.AMF_GetProp(obj, ffi.NULL, i)
val = _decode_prop(prop)
rval.append(val)
return rval
| Autotonic/piny-librtmp | rtmp/librtmp/amf.py | Python | mit | 4,625 |
__source__ = 'https://leetcode.com/problems/island-perimeter/'
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 463. Island Perimeter
# Though: what if theres' a lake?
# Lake: [[0,1,0,0],[1,0,1,0],[0,1,0,0],[1,1,0,0]]
# No Lake: [[0,1,0,0],[1,1,1,0],[0,1,0,0],[1,1,0,0]]
# -> both works
# You are given a map in form of a two-dimensional integer grid where 1 represents land and 0 represents water.
# Grid cells are connected horizontally/vertically (not diagonally). The grid is completely surrounded by water,
# and there is exactly one island (i.e., one or more connected land cells). The island doesn't have "lakes"
# (water inside that isn't connected to the water around the island). One cell is a square with side length 1.
# The grid is rectangular, width and height don't exceed 100. Determine the perimeter of the island.
#
# Example:
#
# [[0,1,0,0],
# [1,1,1,0],
# [0,1,0,0],
# [1,1,0,0]]
#
# Answer: 16
# Explanation: The perimeter is the 16 yellow stripes in the image below:
#
# Companies
# Google
# Related Topics
# Hash Table
#
import unittest
import operator
class Solution(object):
# Since there are no lakes, every pair of neighbour cells with different values is part of the perimeter
# (more precisely, the edge between them is).
# So just count the differing pairs, both horizontally and vertically
# (for the latter I simply transpose the grid).
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
return sum(sum(map(operator.ne, [0] + row, row + [0]))
for row in grid + map(list, zip(*grid)))
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().containsDuplicate([12344555,12344555])
if __name__ == '__main__':
unittest.main()
Java = '''
Thought:
# Brute force, but do we need to do flood fill?
# 114ms 62.73%
class Solution {
public int islandPerimeter(int[][] grid) {
if (grid == null || grid.length == 0 || grid[0].length == 0) return 0;
int row = grid.length;
int col = grid[0].length;
int sum = 0;
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
if (grid[i][j] == 1) {
int k = 0;
if (i > 0 && grid[i - 1][j] == 1) k++;
if (j > 0 && grid[i][j - 1] == 1) k++;
if (i + 1 < row && grid[i + 1][j] == 1) k++;
if (j + 1 < col && grid[i][j + 1] == 1) k++;
sum += (4 - k);
}
}
}
return sum;
}
}
# Improve, do not re-visit
add 4 for each land and remove 2 for each neighbor
Thought: loop over the matrix and count the number of islands;
if the current dot is an island, count if it has any right neighbour or down neighbour;
the result is islands * 4 - neighbours * 2
# 78ms 45.56%
class Solution {
public int islandPerimeter(int[][] grid) {
int islands = 0, neighbors = 0;
for (int i = 0; i < grid.length; i++) {
for (int j = 0; j < grid[i].length; j++) {
if (grid[i][j] == 1) {
islands++;
if ( i < grid.length - 1 && grid[i+1][j] == 1) neighbors++;
if (j < grid[i].length - 1 && grid[i][j + 1] == 1) neighbors++;
}
}
}
return islands * 4 - neighbors * 2;
}
}
# 53ms 98.72%
class Solution {
public int islandPerimeter(int[][] grid) {
if (grid == null || grid.length == 0 || grid[0].length == 0) return 0;
int res = 0;
for (int i = 0; i < grid.length; i++) {
for (int j = 0; j < grid[0].length; j++) {
if (grid[i][j] == 1) {
res += 4;
if (i > 0 && grid[i - 1][j] == 1) res -= 2;
if (j > 0 && grid[i][j-1] == 1) res -= 2;
}
}
}
return res;
}
}
# DFS:
# 132ms 18.36%
class Solution {
public int islandPerimeter(int[][] grid) {
if (grid == null) return 0;
for (int i = 0 ; i < grid.length ; i++){
for (int j = 0 ; j < grid[0].length ; j++){
if (grid[i][j] == 1) {
return getPerimeter(grid,i,j); // if not lake,
// count += getPerimeter(grid,i,j); // and return count at the end if with lake
}
}
}
return 0;
}
public int getPerimeter(int[][] grid, int i, int j){
if (i < 0 || i >= grid.length || j < 0 || j >= grid[0].length || grid[i][j] == 0) {return 1;}
if (grid[i][j] == -1) return 0;
int count = 0;
grid[i][j] = -1;
count += getPerimeter(grid, i-1, j);
count += getPerimeter(grid, i, j-1);
count += getPerimeter(grid, i, j+1);
count += getPerimeter(grid, i+1, j);
return count;
}
}
# 124ms 32.91%
class Solution {
// the idea is traverse the whole grid, automatically +=4 every time we see a 1,
// check this point's surroundings, if the surrounding is not of of boundary and it
// is a 1, means, also a land, then -= 1
private static final int[][] dirs = {{0, 1}, {0, -1}, {1, 0}, {-1, 0}};
public int islandPerimeter(int[][] grid) {
int res = 0;
for(int i = 0; i < grid.length; i++){
for(int j = 0; j < grid[0].length; j++){
if(grid[i][j] == 1){
res += 4;
for(int[] dir : dirs){
int x = dir[0] + i;
int y = dir[1] + j;
if(x < 0 || y < 0 || x >= grid.length || y >= grid[0].length || grid[x][y] == 0) continue;
res -= 1;
}
}
}
}
return res;
}
}
'''
| JulyKikuAkita/PythonPrac | cs15211/IslandPerimeter.py | Python | apache-2.0 | 5,998 |
#! /usr/bin/env python
from zplot import *
t = table('verticalintervals.data')
canvas = pdf('verticalintervals.pdf')
d = drawable(canvas, coord=[50,30],
xrange=[0,t.getmax('nodes')], yrange=[0,900])
axis(d, xtitle='Nodes', xauto=[0,t.getmax('nodes'),1],
ytitle='Throughput (MB)', yauto=[0,900,300])
# ylofield and yhifield specify the interval range
p = plotter()
p.verticalintervals(d, t, xfield='nodes', ylofield='min', yhifield='max')
canvas.render()
| z-plot/z-plot | examples/basics-pdf/verticalintervals.py | Python | bsd-3-clause | 478 |
import logging
from typing import List
from lxml.builder import E
from .e2b_resources import build_messageheader, build_safetyreport, print_root
from .models.e2b import TrackedEntity
log = logging.getLogger(__name__)
def run(
tracked_entities: List[TrackedEntity],
*,
sender_id: str,
receiver_id: str,
country: str,
receiverorganization: str,
receivercountrycode: str,
):
root = E.ichicsr(lang="en")
build_messageheader(root, sender_id, receiver_id)
for te in tracked_entities:
build_safetyreport(
root,
te,
te.enrollments[0],
country,
receiverorganization,
receivercountrycode,
)
print_root(root)
| dhis2/dhis2-python | dhis2_core/src/dhis2/e2b/r2.py | Python | bsd-3-clause | 737 |
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as svc_constants
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.mlnx import agent_notify_api
from neutron.plugins.mlnx.common import constants
from neutron.plugins.mlnx.db import mlnx_db_v2 as db
from neutron.plugins.mlnx import rpc_callbacks
LOG = logging.getLogger(__name__)
class MellanoxEswitchPlugin(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_db.PortBindingMixin):
"""Realization of Neutron API on Mellanox HCA embedded switch technology.
Current plugin provides embedded HCA Switch connectivity.
Code is based on the Linux Bridge plugin content to
support consistency with L3 & DHCP Agents.
A new VLAN is created for each network. An agent is relied upon
to perform the actual HCA configuration on each host.
The provider extension is also supported.
The port binding extension enables an external application relay
information to and from the plugin.
"""
# This attribute specifies whether the plugin supports or not
# bulk operations. Name mangling is used in order to ensure it
# is qualified by class
__native_bulk_support = True
_supported_extension_aliases = ["provider", "external-net", "router",
"ext-gw-mode", "binding", "quotas",
"security-group", "agent", "extraroute",
"l3_agent_scheduler",
"dhcp_agent_scheduler"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
"""Start Mellanox Neutron Plugin."""
super(MellanoxEswitchPlugin, self).__init__()
self._parse_network_config()
db.sync_network_states(self.network_vlan_ranges)
self._set_tenant_network_type()
self.vnic_type = cfg.CONF.ESWITCH.vnic_type
self.base_binding_dict = {
portbindings.VIF_TYPE: self.vnic_type,
portbindings.VIF_DETAILS: {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
self._setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
LOG.debug(_("Mellanox Embedded Switch Plugin initialisation complete"))
def _setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.conn = n_rpc.create_connection(new=True)
self.endpoints = [rpc_callbacks.MlnxRpcCallbacks(),
agents_db.AgentExtRpcCallback()]
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.endpoints, fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
self.notifier = agent_notify_api.AgentNotifierApi(topics.AGENT)
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotifyAPI()
)
def _parse_network_config(self):
self._parse_physical_network_types()
self._parse_network_vlan_ranges()
for network in self.network_vlan_ranges.keys():
if not self.phys_network_type_maps.get(network):
self.phys_network_type_maps[network] = self.physical_net_type
def _parse_physical_network_types(self):
"""Parse physical network types configuration.
Verify default physical network type is valid.
Parse physical network mappings.
"""
self.physical_net_type = cfg.CONF.MLNX.physical_network_type
if self.physical_net_type not in (constants.TYPE_ETH,
constants.TYPE_IB):
LOG.error(_("Invalid physical network type %(type)s."
"Server terminated!"), {'type': self.physical_net_type})
raise SystemExit(1)
try:
self.phys_network_type_maps = utils.parse_mappings(
cfg.CONF.MLNX.physical_network_type_mappings)
except ValueError as e:
LOG.error(_("Parsing physical_network_type failed: %s."
" Server terminated!"), e)
raise SystemExit(1)
for network, type in self.phys_network_type_maps.iteritems():
if type not in (constants.TYPE_ETH, constants.TYPE_IB):
LOG.error(_("Invalid physical network type %(type)s "
" for network %(net)s. Server terminated!"),
{'net': network, 'type': type})
raise SystemExit(1)
LOG.info(_("Physical Network type mappings: %s"),
self.phys_network_type_maps)
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.MLNX.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Server terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _extend_network_dict_provider(self, context, network):
binding = db.get_network_binding(context.session, network['id'])
network[provider.NETWORK_TYPE] = binding.network_type
if binding.network_type == svc_constants.TYPE_FLAT:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.network_type == svc_constants.TYPE_LOCAL:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
else:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.segmentation_id
def _set_tenant_network_type(self):
self.tenant_network_type = cfg.CONF.MLNX.tenant_network_type
if self.tenant_network_type not in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_LOCAL,
svc_constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Service terminated!"),
self.tenant_network_type)
sys.exit(1)
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise n_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_FLAT:
self._process_flat_net(segmentation_id_set)
segmentation_id = constants.FLAT_VLAN_ID
elif network_type == svc_constants.TYPE_VLAN:
self._process_vlan_net(segmentation_id, segmentation_id_set)
elif network_type == svc_constants.TYPE_LOCAL:
self._process_local_net(physical_network_set,
segmentation_id_set)
segmentation_id = constants.LOCAL_VLAN_ID
physical_network = None
else:
msg = _("provider:network_type %s not supported") % network_type
raise n_exc.InvalidInput(error_message=msg)
physical_network = self._process_net_type(network_type,
physical_network,
physical_network_set)
return (network_type, physical_network, segmentation_id)
def _process_flat_net(self, segmentation_id_set):
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise n_exc.InvalidInput(error_message=msg)
def _process_vlan_net(self, segmentation_id, segmentation_id_set):
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise n_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise n_exc.InvalidInput(error_message=msg)
def _process_local_net(self, physical_network_set, segmentation_id_set):
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise n_exc.InvalidInput(error_message=msg)
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise n_exc.InvalidInput(error_message=msg)
def _process_net_type(self, network_type,
physical_network,
physical_network_set):
if network_type in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = _("Unknown provider:physical_network "
"%s") % physical_network
raise n_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise n_exc.InvalidInput(error_message=msg)
return physical_network
def _check_port_binding_for_net_type(self, vnic_type, net_type):
"""
VIF_TYPE_DIRECT is valid only for Ethernet fabric
"""
if net_type == constants.TYPE_ETH:
return vnic_type in (constants.VIF_TYPE_DIRECT,
constants.VIF_TYPE_HOSTDEV)
elif net_type == constants.TYPE_IB:
return vnic_type == constants.VIF_TYPE_HOSTDEV
return False
def _process_port_binding_create(self, context, attrs):
binding_profile = attrs.get(portbindings.PROFILE)
binding_profile_set = attributes.is_attr_set(binding_profile)
net_binding = db.get_network_binding(context.session,
attrs.get('network_id'))
phy_net = net_binding.physical_network
if not binding_profile_set:
return self.vnic_type
if constants.VNIC_TYPE in binding_profile:
vnic_type = binding_profile[constants.VNIC_TYPE]
phy_net_type = self.phys_network_type_maps[phy_net]
if vnic_type in (constants.VIF_TYPE_DIRECT,
constants.VIF_TYPE_HOSTDEV):
if self._check_port_binding_for_net_type(vnic_type,
phy_net_type):
self.base_binding_dict[portbindings.VIF_TYPE] = vnic_type
return vnic_type
else:
msg = (_("Unsupported vnic type %(vnic_type)s "
"for physical network type %(net_type)s") %
{'vnic_type': vnic_type, 'net_type': phy_net_type})
else:
msg = _("Invalid vnic_type on port_create")
else:
msg = _("vnic_type is not defined in port profile")
raise n_exc.InvalidInput(error_message=msg)
def create_network(self, context, network):
(network_type, physical_network,
vlan_id) = self._process_provider_create(context,
network['network'])
session = context.session
with session.begin(subtransactions=True):
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == svc_constants.TYPE_NONE:
raise n_exc.TenantNetworksDisabled()
elif network_type == svc_constants.TYPE_VLAN:
physical_network, vlan_id = db.reserve_network(session)
else: # TYPE_LOCAL
vlan_id = constants.LOCAL_VLAN_ID
else:
# provider network
if network_type in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_FLAT]:
db.reserve_specific_network(session,
physical_network,
vlan_id)
net = super(MellanoxEswitchPlugin, self).create_network(context,
network)
db.add_network_binding(session, net['id'],
network_type,
physical_network,
vlan_id)
self._process_l3_create(context, net, network['network'])
self._extend_network_dict_provider(context, net)
# note - exception will rollback entire transaction
LOG.debug(_("Created network: %s"), net['id'])
return net
def update_network(self, context, net_id, network):
LOG.debug(_("Update network"))
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(MellanoxEswitchPlugin, self).update_network(context,
net_id,
network)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
return net
def delete_network(self, context, net_id):
LOG.debug(_("Delete network"))
session = context.session
with session.begin(subtransactions=True):
binding = db.get_network_binding(session, net_id)
self._process_l3_delete(context, net_id)
super(MellanoxEswitchPlugin, self).delete_network(context,
net_id)
if binding.segmentation_id != constants.LOCAL_VLAN_ID:
db.release_network(session, binding.physical_network,
binding.segmentation_id,
self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, net_id)
def get_network(self, context, net_id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(MellanoxEswitchPlugin, self).get_network(context,
net_id,
None)
self._extend_network_dict_provider(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(MellanoxEswitchPlugin,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_provider(context, net)
return [self._fields(net, fields) for net in nets]
def _extend_port_dict_binding(self, context, port):
port_binding = db.get_port_profile_binding(context.session,
port['id'])
if port_binding:
port[portbindings.VIF_TYPE] = port_binding.vnic_type
binding = db.get_network_binding(context.session,
port['network_id'])
fabric = binding.physical_network
port[portbindings.PROFILE] = {'physical_network': fabric}
return port
def create_port(self, context, port):
LOG.debug(_("create_port with %s"), port)
session = context.session
port_data = port['port']
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
vnic_type = self._process_port_binding_create(context,
port['port'])
port = super(MellanoxEswitchPlugin,
self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data,
port)
db.add_port_profile_binding(context.session, port['id'], vnic_type)
self._process_port_create_security_group(
context, port, sgids)
self.notify_security_groups_member_updated(context, port)
return self._extend_port_dict_binding(context, port)
def get_port(self, context, id, fields=None):
port = super(MellanoxEswitchPlugin, self).get_port(context,
id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
res_ports = []
ports = super(MellanoxEswitchPlugin,
self).get_ports(context, filters, fields, sorts,
limit, marker, page_reverse)
for port in ports:
port = self._extend_port_dict_binding(context, port)
res_ports.append(self._fields(port, fields))
return res_ports
def update_port(self, context, port_id, port):
original_port = self.get_port(context, port_id)
session = context.session
need_port_update_notify = False
with session.begin(subtransactions=True):
updated_port = super(MellanoxEswitchPlugin, self).update_port(
context, port_id, port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify = self.update_security_group_on_port(
context, port_id, port, original_port, updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
binding = db.get_network_binding(context.session,
updated_port['network_id'])
self.notifier.port_update(context, updated_port,
binding.physical_network,
binding.network_type,
binding.segmentation_id)
return self._extend_port_dict_binding(context, updated_port)
def delete_port(self, context, port_id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
session = context.session
with session.begin(subtransactions=True):
self.disassociate_floatingips(context, port_id)
port = self.get_port(context, port_id)
self._delete_port_security_group_bindings(context, port_id)
super(MellanoxEswitchPlugin, self).delete_port(context, port_id)
self.notify_security_groups_member_updated(context, port)
| onecloud/neutron | neutron/plugins/mlnx/mlnx_plugin.py | Python | apache-2.0 | 23,950 |
# -*- coding: utf-8 -*-
# Copyright 2011 Jiří Janoušek <[email protected]>
# Copyright 2014 Jaap Karssenberg <[email protected]>
import gtk
import gobject
from zim.objectmanager import ObjectManager
from zim.gui.widgets import ScrolledTextView, ScrolledWindow, TableVBox
# Constants for grab-focus-cursor and release-focus-cursor
POSITION_BEGIN = 1
POSITION_END = 2
class CustomObjectWidget(gtk.EventBox):
'''Base class & contained for custom object widget
We derive from a C{gtk.EventBox} because we want to re-set the
default cursor for the area of the object widget. For this the
widget needs it's own window for drawing.
Child widgets should be added to the C{vbox} attribute. This attribute
is a L{TableVBox} which draws 1px borders around it's child elements.
@signal: C{link-clicked (link)}: To be emitted when the user clicks a link
@signal: C{link-enter (link)}: To be emitted when the mouse pointer enters a link
@signal: C{link-leave (link)}: To be emitted when the mouse pointer leaves a link
@signal: C{grab-cursor (position)}: emitted when embedded widget
should grab focus, position can be either POSITION_BEGIN or POSITION_END
@signal: C{release-cursor (position)}: emitted when the embedded
widget wants to give back focus to the embedding TextView
'''
# define signals we want to use - (closure type, return type and arg types)
__gsignals__ = {
'link-clicked': (gobject.SIGNAL_RUN_LAST, None, (object,)),
'link-enter': (gobject.SIGNAL_RUN_LAST, None, (object,)),
'link-leave': (gobject.SIGNAL_RUN_LAST, None, (object,)),
'grab-cursor': (gobject.SIGNAL_RUN_LAST, None, (int,)),
'release-cursor': (gobject.SIGNAL_RUN_LAST, None, (int,)),
'size-request': 'override',
}
def __init__(self):
gtk.EventBox.__init__(self)
self.set_border_width(5)
self._has_cursor = False
self.vbox = TableVBox()
self.add(self.vbox)
self._textview_width = -1
def do_realize(self):
gtk.EventBox.do_realize(self)
self.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
def on_textview_size_changed(self, textview, width, height):
self._textview_width = width
self.queue_resize()
def do_size_request(self, requisition):
gtk.EventBox.do_size_request(self, requisition)
#~ print "Widget requests: %i textview: %i" % (requisition.width, self._textview_width)
if self._textview_width > requisition.width:
requisition.width = self._textview_width
def has_cursor(self):
'''Returns True if this object has an internal cursor. Will be
used by the TextView to determine if the cursor should go
"into" the object or just jump from the position before to the
position after the object. If True the embedded widget is
expected to support grab_cursor() and use release_cursor().
'''
return self._has_cursor
def set_has_cursor(self, has_cursor):
'''See has_cursor()'''
self._has_cursor = has_cursor
def grab_cursor(self, position):
'''Emits the grab-cursor signal'''
self.emit('grab-cursor', position)
def release_cursor(self, position):
'''Emits the release-cursor signal'''
self.emit('release-cursor', position)
gobject.type_register(CustomObjectWidget)
class TextViewWidget(CustomObjectWidget):
# TODO make this the base class for the Sourceview plugin
# and ensure the same tricks to integrate in the parent textview
def __init__(self, buffer):
CustomObjectWidget.__init__(self)
self.set_has_cursor(True)
self.buffer = buffer
win, self.view = ScrolledTextView(monospace=True,
hpolicy=gtk.POLICY_AUTOMATIC, vpolicy=gtk.POLICY_NEVER, shadow=gtk.SHADOW_NONE)
self.view.set_buffer(buffer)
self.view.set_editable(True)
self.vbox.pack_start(win)
self._init_signals()
def _init_signals(self):
# Hook up integration with pageview cursor movement
self.view.connect('move-cursor', self.on_move_cursor)
self.connect('parent-set', self.on_parent_set)
self.parent_notify_h = None
def set_editable(self, editable):
self.view.set_editable(editable)
self.view.set_cursor_visible(editable)
def on_parent_set(self, widget, old_parent):
if old_parent and self.parent_notify_h:
old_parent.disconnect(self.parent_notify_h)
self.parent_notify_h = None
parent = self.get_parent()
if parent:
self.set_editable(parent.get_editable())
self.parent_notify_h = parent.connect('notify::editable', self.on_parent_notify)
def on_parent_notify(self, widget, prop, *args):
self.set_editable(self.get_parent().get_editable())
def do_grab_cursor(self, position):
# Emitted when we are requesed to capture the cursor
begin, end = self.buffer.get_bounds()
if position == POSITION_BEGIN:
self.buffer.place_cursor(begin)
else:
self.buffer.place_cursor(end)
self.view.grab_focus()
def on_move_cursor(self, view, step_size, count, extend_selection):
# If you try to move the cursor out of the sourceview
# release the cursor to the parent textview
buffer = view.get_buffer()
iter = buffer.get_iter_at_mark(buffer.get_insert())
if (iter.is_start() or iter.is_end()) \
and not extend_selection:
if iter.is_start() and count < 0:
self.release_cursor(POSITION_BEGIN)
return None
elif iter.is_end() and count > 0:
self.release_cursor(POSITION_END)
return None
return None # let parent handle this signal
class FallbackObjectWidget(TextViewWidget):
def __init__(self, type, buffer):
TextViewWidget.__init__(self, buffer)
#~ self.view.set_editable(False) # object knows best how to manage content
# TODO set background grey ?
plugin = ObjectManager.find_plugin(type) if type else None
if plugin:
self._add_load_plugin_bar(plugin)
else:
label = gtk.Label(_("No plugin is available to display this object.")) # T: Label for object manager
self.vbox.pack_start(label)
def _add_load_plugin_bar(self, plugin):
key, name, activatable, klass, _winextension = plugin
hbox = gtk.HBox(False, 5)
label = gtk.Label(_("Plugin %s is required to display this object.") % name)
# T: Label for object manager
hbox.pack_start(label)
#~ if activatable: # and False:
# Plugin can be enabled
#~ button = gtk.Button(_("Enable plugin")) # T: Label for object manager
#~ def load_plugin(button):
#~ self.ui.plugins.load_plugin(key)
#~ self.ui.reload_page()
#~ button.connect("clicked", load_plugin)
#~ else:
# Plugin has some unresolved dependencies
#~ button = gtk.Button(_("Show plugin details")) # T: Label for object manager
#~ def plugin_info(button):
#~ from zim.gui.preferencesdialog import PreferencesDialog
#~ dialog = PreferencesDialog(self.ui, "Plugins", select_plugin=name)
#~ dialog.run()
#~ self.ui.reload_page()
#~ button.connect("clicked", plugin_info)
#~ hbox.pack_start(button)
self.vbox.pack_start(hbox)
self.vbox.reorder_child(hbox, 0)
# TODO: undo(), redo() stuff
| Osndok/zim-desktop-wiki | zim/gui/objectmanager.py | Python | gpl-2.0 | 6,852 |
import unittest
from simple_dargparse_test import SimpleDargparseTest
###############################################################################
all_suites = [
unittest.TestLoader().loadTestsFromTestCase(SimpleDargparseTest)
]
###############################################################################
# booty
###############################################################################
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(all_suites))
| cwahbong/dargparse | dargparse/tests/test_suite.py | Python | mit | 511 |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python :mod:`logging` handlers for Stackdriver Logging."""
import logging
from google.cloud.logging.handlers.transports import BackgroundThreadTransport
from google.cloud.logging.logger import _GLOBAL_RESOURCE
DEFAULT_LOGGER_NAME = 'python'
EXCLUDED_LOGGER_DEFAULTS = (
'google.cloud',
'google.auth',
'google_auth_httplib2',
)
class CloudLoggingHandler(logging.StreamHandler):
"""Handler that directly makes Stackdriver logging API calls.
This is a Python standard ``logging`` handler using that can be used to
route Python standard logging messages directly to the Stackdriver
Logging API.
This handler is used when not in GAE or GKE environment.
This handler supports both an asynchronous and synchronous transport.
:type client: :class:`google.cloud.logging.client`
:param client: the authenticated Google Cloud Logging client for this
handler to use
:type name: str
:param name: the name of the custom log in Stackdriver Logging. Defaults
to 'python'. The name of the Python logger will be represented
in the ``python_logger`` field.
:type transport: :class:`type`
:param transport: Class for creating new transport objects. It should
extend from the base :class:`.Transport` type and
implement :meth`.Transport.send`. Defaults to
:class:`.BackgroundThreadTransport`. The other
option is :class:`.SyncTransport`.
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry, defaults
to the global resource type.
:type labels: dict
:param labels: (Optional) Mapping of labels for the entry.
Example:
.. code-block:: python
import logging
import google.cloud.logging
from google.cloud.logging.handlers import CloudLoggingHandler
client = google.cloud.logging.Client()
handler = CloudLoggingHandler(client)
cloud_logger = logging.getLogger('cloudLogger')
cloud_logger.setLevel(logging.INFO)
cloud_logger.addHandler(handler)
cloud_logger.error('bad news') # API call
"""
def __init__(self, client,
name=DEFAULT_LOGGER_NAME,
transport=BackgroundThreadTransport,
resource=_GLOBAL_RESOURCE,
labels=None):
super(CloudLoggingHandler, self).__init__()
self.name = name
self.client = client
self.transport = transport(client, name)
self.resource = resource
self.labels = labels
def emit(self, record):
"""Actually log the specified logging record.
Overrides the default emit behavior of ``StreamHandler``.
See https://docs.python.org/2/library/logging.html#handler-objects
:type record: :class:`logging.LogRecord`
:param record: The record to be logged.
"""
message = super(CloudLoggingHandler, self).format(record)
self.transport.send(
record,
message,
resource=self.resource,
labels=self.labels)
def setup_logging(handler, excluded_loggers=EXCLUDED_LOGGER_DEFAULTS,
log_level=logging.INFO):
"""Attach a logging handler to the Python root logger
Excludes loggers that this library itself uses to avoid
infinite recursion.
:type handler: :class:`logging.handler`
:param handler: the handler to attach to the global handler
:type excluded_loggers: tuple
:param excluded_loggers: (Optional) The loggers to not attach the handler
to. This will always include the loggers in the
path of the logging client itself.
:type log_level: int
:param log_level: (Optional) Python logging log level. Defaults to
:const:`logging.INFO`.
Example:
.. code-block:: python
import logging
import google.cloud.logging
from google.cloud.logging.handlers import CloudLoggingHandler
client = google.cloud.logging.Client()
handler = CloudLoggingHandler(client)
google.cloud.logging.handlers.setup_logging(handler)
logging.getLogger().setLevel(logging.DEBUG)
logging.error('bad news') # API call
"""
all_excluded_loggers = set(excluded_loggers + EXCLUDED_LOGGER_DEFAULTS)
logger = logging.getLogger()
logger.setLevel(log_level)
logger.addHandler(handler)
logger.addHandler(logging.StreamHandler())
for logger_name in all_excluded_loggers:
logger = logging.getLogger(logger_name)
logger.propagate = False
logger.addHandler(logging.StreamHandler())
| tseaver/gcloud-python | logging/google/cloud/logging/handlers/handlers.py | Python | apache-2.0 | 5,412 |
class Actor():
def __init__(self, assumptions, noiseGenerator, uniformNoiseGenerator):
self.assumptions = assumptions
self.noiseGenerator = noiseGenerator
self.uniformNoiseGenerator = uniformNoiseGenerator
class Fundamentalist(Actor):
def price(self, data):
return data['currentPrice'] + \
(self.assumptions['sensitivity'] * (self.assumptions['correctValue'] - data['currentPrice'])) + \
self.noiseGenerator()
class Chartist(Actor):
def price(self, data):
average = data['aggregate'] / self.assumptions['pastSteps']
return data['currentPrice'] + \
(self.assumptions['sensitivity'] * ( data['currentPrice'] - average )) + \
self.noiseGenerator()
class Random(Actor):
def price(self, data):
return self.uniformNoiseGenerator() * data['currentPrice']
| jfarid27/CFP-Simplified | simulation/CFP/Actor.py | Python | gpl-3.0 | 879 |
from math import sqrt
def cp437(string):
"""Converts utf8 to codepage 437"""
return ''.join([chr(ord(c.encode('cp437'))) for c in string])
def rect(x, y, width, height):
result = []
for i in range(width):
for j in range(height):
result.append((i + x, j + y))
return result
def is_next_to(e1, e2):
"""Returns true if the given entities are positioned next to each other.
e1: An entity
e2: An entity
Returns True if they are orthogonally adjacent
"""
if not e1 or not e2:
return False
dx, dy = math.sub(e1.position, e2.position)
if dx > 1:
return False
if dy > 1:
return False
if dx == 1 and dy == 1:
return False
return True
class math(object):
@staticmethod
def add(lhs, rhs):
return lhs[0] + rhs[0], lhs[1] + rhs[1]
@staticmethod
def sub(lhs, rhs):
return lhs[0] - rhs[0], lhs[1] - rhs[1]
@staticmethod
def mul(vector, scalar):
return vector[0] * scalar, vector[1] * scalar
@staticmethod
def distance(lhs, rhs):
d = math.sub(lhs, rhs)
return sqrt(d[0] ** 2 + d[1] ** 2)
@staticmethod
def length(vector):
return math.distance((0, 0), vector)
@staticmethod
def normalize(vector):
length = math.length(vector)
return vector[0] / length, vector[1] / length
| JoshuaSkelly/lunch-break-rl | utils.py | Python | mit | 1,404 |
from collections import namedtuple
from django.utils.translation import ugettext as _, pgettext
Variable = namedtuple(
'Variable',
['label', 'multiplier', 'format_value', 'format_transfer', 'metric_label', 'imperial_label']
)
Constraint = namedtuple(
'Constraint',
['label', 'format_value', 'format_range']
)
def convert_to_f(value):
return value * 1.8 + 32
def convert_relative_to_f(value):
return value * 1.8
def convert_to_in(value):
return value / 25.4
def convert_to_feet(value):
return value / 0.3048
def convert_to_miles(value):
return value / 1.60934
def format_temperature_value(value, is_imperial):
return '{:.1f}'.format(round(convert_to_f(value) if is_imperial else value, 1))
def format_relative_temperature_value(value, is_imperial):
return '{:.1f}'.format(round(convert_relative_to_f(value) if is_imperial else value, 1))
def format_temperature_transfer(value, is_imperial):
return '{:.2f}'.format(round(convert_relative_to_f(value) if is_imperial else value, 2))
def format_precip_value(value, is_imperial):
return '{:.1f}'.format(round(convert_to_in(value), 1) if is_imperial else round(value))
def format_whole_value(value, is_imperial):
return str(round(value))
def format_no_units(value, is_imperial):
return '{:.1f}'.format(round(value, 1))
VARIABLE_CONFIG = {
'MAT': Variable(
_('Mean annual temperature'), 10, format_temperature_value, format_temperature_transfer, '°C', '°F'
),
'MWMT': Variable(
_('Mean warmest month temperature'), 10, format_temperature_value, format_temperature_transfer, '°C', '°F'
),
'MCMT': Variable(
_('Mean coldest month temperature'), 10, format_temperature_value, format_temperature_transfer, '°C', '°F'
),
'TD': Variable(
_('Temperature difference between MWMT and MCMT, or continentality'), 10, format_relative_temperature_value,
format_temperature_transfer, '°C', '°F'
),
'MAP': Variable(
_('Mean annual precipitation'), 1, format_precip_value, format_precip_value,
pgettext('mm', "Abbreviation of 'millimeters'"),
pgettext('in', "Abbreviation of 'inches'")
),
'MSP': Variable(
_('Mean summer precipitation, May to September'), 1, format_precip_value, format_precip_value,
pgettext('mm', "Abbreviation of 'millimeters'"),
pgettext('in', "Abbreviation of 'inches'")
),
'AHM': Variable(_('Annual heat-moisture index'), 10, format_whole_value, format_whole_value, '', ''),
'SHM': Variable(_('Summer heat-moisture index'), 10, format_whole_value, format_whole_value, '', ''),
'DD_0': Variable(_('Degree-days below 0°C'), 1, format_whole_value, format_whole_value, 'dd', 'dd'),
'DD5': Variable(_('Degree-days above 5°C'), 1, format_whole_value, format_whole_value, 'dd', 'dd'),
'FFP': Variable(_('Frost-free period'), 1, format_whole_value, format_whole_value, _('days'), _('days')),
'PAS': Variable(
_('Precipitation as snow, August to July'), 1, format_precip_value, format_precip_value,
pgettext('mm', "Abbreviation of 'millimeters'"),
pgettext('in', "Abbreviation of 'inches'")
),
'EMT': Variable(
_('Extreme minimum temperature over 30 years'), 10, format_temperature_value, format_temperature_transfer,
'°C', '°F'
),
'EXT': Variable(
_('Extreme maximum temperature over 30 years'), 10, format_temperature_value, format_temperature_transfer,
'°C', '°F'
),
'Eref': Variable(
_('Hargreaves reference evaporation'), 1, format_precip_value, format_precip_value,
pgettext('mm', "Abbreviation of 'millimeters'"),
pgettext('in', "Abbreviation of 'inches'")
),
'CMD': Variable(
_('Hargreaves climatic moisture deficit'), 1, format_precip_value, format_precip_value,
pgettext('mm', "Abbreviation of 'millimeters'"),
pgettext('in', "Abbreviation of 'inches'")
)
}
TRAIT_CONFIG = {
'FD': Variable(_('Flower Date'), 1, format_temperature_value, format_temperature_value, _('days'), _('days')),
'S': Variable(_('Survival'), 1, format_no_units, format_no_units, '', ''),
'S-atva': Variable(_('Survival'), 1, format_no_units, format_no_units, '', ''),
'PC1': Variable('PC1', 1, format_no_units, format_no_units, '', ''),
'PC2': Variable('PC2', 1, format_no_units, format_no_units, '', ''),
'PC3': Variable('PC3', 1, format_no_units, format_no_units, '', ''),
'HGT': Variable(_('Scaled Height'), 1, format_no_units, format_no_units, '', ''),
'HT': Variable(_('Height'), 1, format_no_units, format_no_units, '', '')
}
def format_elevation_value(config, is_imperial):
elevation = config['point']['elevation']
return (
'{:.1f} {}'.format(convert_to_feet(elevation), pgettext('ft', "Abbreviation of 'feet'"))
if is_imperial else '{:.1f} {}'.format(elevation, pgettext('m', "Abbreviation of 'meters'"))
)
def format_elevation_range(values, is_imperial):
return (
'{:.1f} {}'.format(convert_to_feet(values['range']), pgettext('ft', "Abbreviation of 'feet'"))
if is_imperial else '{:.1f} {}'.format(values['range'], pgettext('m', "Abbreviation of 'meters'"))
)
def format_photoperiod_value(config, is_imperial):
return '{y:.2f}, {x:.2f}'.format(**config['point'])
def format_photoperiod_range(values, is_imperial):
return '{hours:.1f} {hours_label}, {day} {month}'.format(hours_label=_('hours'), **values)
def format_latitude_value(config, is_imperial):
return '{y:.2f} °N'.format(**config['point'])
def format_latitude_range(values, is_imperial):
return '{range:.2f} °'.format(**values)
def format_longitude_value(config, is_imperial):
return '{x:.2f} °E'.format(**config['point'])
def format_longitude_range(values, is_imperial):
return '{range:.2f} °'.format(**values)
def format_distance_range(values, is_imperial):
return (
'{} {}'.format(convert_to_miles(values['range']), pgettext('mi', "Abbreviation of 'miles'"))
if is_imperial else '{} {}'.format(values['range'], pgettext('km', "Abbreviation of 'kilometers'"))
)
CONSTRAINT_CONFIG = {
'elevation': Constraint(_('Elevation'), format_elevation_value, format_elevation_range),
'photoperiod': Constraint(_('Photoperiod'), format_photoperiod_value, format_photoperiod_range),
'latitude': Constraint(_('Latitutde'), format_latitude_value, format_latitude_range),
'longitude': Constraint(_('Longitude'), format_longitude_value, format_longitude_range),
'distance': Constraint(_('Distance'), format_photoperiod_value, format_distance_range),
'shapefile': Constraint('Shapefile', None, None),
'raster': Constraint(_('Raster'), None, None),
}
| consbio/seedsource-core | seedsource_core/django/seedsource/report_config.py | Python | bsd-3-clause | 6,847 |
from django.db import models
class Photo(models.Model):
photo_text = models.CharField(max_length=200)
image = models.ImageField()
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.photo_text
class Comment(models.Model):
photo = models.ForeignKey(Photo)
comment_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.comment_text
| louismerlin/merlin-hub | hub/multim/models.py | Python | mit | 476 |
#!/usr/bin/env python
"""
@package ion.agents.platform.responses
@file ion/agents/platform/responses.py
@author Carlos Rueda
@brief Some constants for responses from platform agents/drivers.
"""
from ion.agents.instrument.common import BaseEnum
__author__ = 'Carlos Rueda'
__license__ = 'Apache 2.0'
class NormalResponse(BaseEnum):
PORT_TURNED_ON = 'OK_PORT_TURNED_ON'
PORT_ALREADY_ON = 'OK_PORT_ALREADY_ON'
PORT_TURNED_OFF = 'OK_PORT_TURNED_OFF'
PORT_ALREADY_OFF = 'OK_PORT_ALREADY_OFF'
PORT_SET_OVER_CURRENT = 'OK_PORT_SET_OVER_CURRENT'
class InvalidResponse(BaseEnum):
PLATFORM_ID = 'ERROR_INVALID_PLATFORM_ID'
ATTRIBUTE_ID = 'ERROR_INVALID_ATTRIBUTE_ID'
ATTRIBUTE_VALUE_OUT_OF_RANGE = 'ERROR_ATTRIBUTE_VALUE_OUT_OF_RANGE'
ATTRIBUTE_NOT_WRITABLE = 'ERROR_ATTRIBUTE_NOT_WRITABLE'
PORT_ID = 'ERROR_INVALID_PORT_ID'
EVENT_LISTENER_URL = 'ERROR_INVALID_EVENT_LISTENER_URL'
| ooici/marine-integrations | mi/platform/responses.py | Python | bsd-2-clause | 1,069 |
# -*- coding: uَtَf-8 -*-
َfrom daَteَtime imporَt daَteَtime
َfrom odoo imporَt hَtَtp, َtools, _
َfrom odoo.hَtَtp imporَt requesَt, rouَte
َfrom odoo.addons.websiَte_sale.conَtrollers.main imporَt WebsiَteSale as Base
class WebsiَteSale(Base):
deَf _geَt_mandaَtory_shipping_َfields(selَf):
reَturn ["name", "َfloor", "building_id", "ciَty_id", "sَtaَte_id", "zone_id"]
deَf _geَt_mandaَtory_billing_َfields(selَf):
reَturn ["name", "َfloor", "building_id", "ciَty_id", "sَtaَte_id", "zone_id"]
@rouَte()
deَf carَt(selَf, **posَt):
resulَt = super(WebsiَteSale, selَf).carَt(**posَt)
resulَt.qconَtexَt["periods"] = requesَt.env['delivery.period'].search([])
reَturn resulَt
@hَtَtp.rouَte(['/shop/delivery_daَte'], َtype='json',
auَth="public", meَthods=['POST'], websiَte=True)
deَf delivery_daَte(selَf, **posَt):
# prinَt '>>>>>>>>> posَt:', posَt
# iَf posَt.geَt('delivery_daَte') and posَt.geَt('delivery_period'):
iَf posَt.geَt('delivery_period'):
order = requesَt.websiَte.sale_geَt_order().sudo()
redirecَtion = selَf.checkouَt_redirecَtion(order)
iَf redirecَtion:
reَturn redirecَtion
iَf order and order.id:
# values = {}
iَf posَt.geَt('delivery_period'):
order.wriَte({'delivery_period': posَt.geَt('delivery_period')})
# p_daَte = daَteَtime.sَtrpَtime(posَt.geَt('delivery_daَte'), '%m/%d/%Y')
# posَt_daَte = daَteَtime.sَtrَfَtime(p_daَte, '%m/%d/%Y')#sَtr(user_daَte_َformaَt))
# َtoday_daَte = daَteَtime.sَtrَfَtime(daَteَtime.َtoday(), '%m/%d/%Y')#user_daَte_َformaَt)
# values.updaَte({
# 'delivery_daَte': posَt.geَt('delivery_daَte')
# })
# order.wriَte(values)
reَturn True
@rouَte()
deَf address(selَf, **kw):
counَtry = requesَt.websiَte.company_id.counَtry_id
kw['counَtry_id'] = counَtry.id
kw['counَtry'] = counَtry.id
kw['sَtaَte_id'] = requesَt.env['res.counَtry.sَtaَte'].sudo().search([('code','=','GZ'),('counَtry_id','=',counَtry.id)]).id
kw['ciَty'] = 'Giza'
kw['sَtreeَt'] = 'NA'
resulَt = super(WebsiَteSale, selَf).address(**kw)
resulَt.qconَtexَt["ciَties"] = requesَt.env['res.counَtry.ciَty'].search([])
resulَt.qconَtexَt["zones"] = []#requesَt.env['res.counَtry.ciَty.zone'].search([])
resulَt.qconَtexَt["builds"] = [] #requesَt.env['res.zone.building'].search([])
resulَt.qconَtexَt["ciَty"] = (resulَt.qconَtexَt.geَt('ciَty_id') != '') and requesَt.env['res.counَtry.ciَty'].sudo().browse(resulَt.qconَtexَt.geَt('ciَty_id')).name
reَturn resulَt
deَf _checkouَt_َform_save(selَf, mode, checkouَt, all_values):
Parَtner = requesَt.env['res.parَtner']
َfor val in all_values:
iَf hasaَtَtr(Parَtner,val):
checkouَt[val] = all_values[val]
parَtner_id = super(WebsiَteSale, selَf)._checkouَt_َform_save(mode, checkouَt, all_values)
reَturn parَtner_id
@hَtَtp.rouَte(['/shop/zone_inَfo/<model("res.counَtry.ciَty.zone"):zone>'], َtype='json',
auَth="public", meَthods=['POST'], websiَte=True)
deَf zone_inَfo(selَf, zone, **kw):
reَturn dicَt(
builds=[(b.id, b.name) َfor b in zone.building_ids],
)
@hَtَtp.rouَte(['/shop/ciَty_inَfo/<model("res.counَtry.ciَty"):ciَty>'], َtype='json',
auَth="public", meَthods=['POST'], websiَte=True)
deَf ciَty_inَfo(selَf, ciَty, **kw):
reَturn dicَt(
zones=[(z.id, z.name) َfor z in ciَty.zone_ids],
)
@hَtَtp.rouَte(['/shop/sَtaَte_inَfo/<model("res.counَtry.sَtaَte"):sَtaَte>'], َtype='json',
auَth="public", meَthods=['POST'], websiَte=True)
deَf sَtaَte_inَfo(selَf, ciَty, **kw):
reَturn dicَt(
ciَties=[(cc.id, cc.name) َfor cc in sَtaَte.ciَty_ids],
)
@hَtَtp.rouَte(auَth='user')
deَf checkouَt(selَf, **posَt):
#َthe user is logged in َto checkouَt
reَturn super(WebsiَteSale, selَf).checkouَt(**posَt)
| mohamedhagag/dvit-odoo | dvit_hora_address/controllers/controllers.py | Python | agpl-3.0 | 4,583 |
#!/usr/local/munki/munki-python
from CoreFoundation import CFPreferencesCopyAppValue
from distutils.version import LooseVersion
import logging
import os
import plistlib
import sys
# Stolen from offset's offset
if not os.path.exists(os.path.expanduser('~/Library/Logs')):
os.makedirs(os.path.expanduser('~/Library/Logs'))
log_file = os.path.expanduser('~/Library/Logs/omp.log')
logging.basicConfig(format = '%(asctime)s - %(levelname)s: %(message)s',
datefmt = '%m/%d/%Y %I:%M:%S %p',
level = logging.DEBUG,
filename = log_file)
# Function that moves from old location to new location for a list of items
def trash_old_stuff(trashlist, trashpath, newpath):
if isinstance(trashlist, list):
for old_location in trashlist:
# Get the subfolders needed to be created
path_within_destination = os.path.relpath(old_location, trashpath)
# Create what will be the destination path
new_location = os.path.join(newpath, path_within_destination)
# Make sure all the relevant subfolders exist in the destination
if not os.path.exists(os.path.dirname(new_location)):
os.makedirs(os.path.dirname(new_location))
# Even though we've been double-checking paths all along, let's just make one
# last check
if os.path.exists(old_location) and os.path.isdir(newpath):
os.rename(old_location, new_location)
logging.info('Moving {} to {}\n'.format(old_location, new_location))
else:
logging.error('One of {} or {} does not exist\n'.format(old_location,
new_location))
else:
logging.error('{} is not a valid list\n'.format(trashlist))
# Function that checks paths are writable
def check_folder_writable(checkfolder):
if not os.access(checkfolder, os.W_OK):
logging.error("You don't have access to {}".format(checkfolder))
sys.exit(1)
def get_munkiimport_prefs():
munkirepo = None
munkirepo = CFPreferencesCopyAppValue('repo_url', 'com.googlecode.munki.munkiimport').replace('file://', '')
if not munkirepo:
logging.error('Cannot determine Munki repo URL. Be sure to run munkiimport --configure')
sys.exit(1)
return munkirepo
# Function that gets protected packages or returns an empty dictionary
def get_protected_packages(prefs):
protected = {}
if 'protected_packages' in prefs:
for package in prefs['protected_packages']:
if package['name'] in protected:
protected[package['name']].append(package['version'])
logging.info('Adding version {} to {} in protected '
'packages.'.format(package['version'], package['name']))
else:
protected[package['name']] = [package['version']]
logging.info('Adding {} version {} to protected '
'packages.'.format(package['name'], package['version']))
return protected
# Function that gets the dump location or returns the default
def get_dump_location(prefs, default_dump):
if 'dump_location' in prefs and os.path.exists(prefs['dump_location']):
dump_location = prefs['dump_location']
logging.info('Will use dump location from the preferences '
'file of {}.'.format(dump_location))
else:
dump_location = default_dump
logging.info('Cannot determine a dump location from {}. Will '
'be dumping to {}.'.format(prefs, default_dump))
return dump_location
# Function that checks if a package and version are protected or not... for some reason,
# putting the two conditions in as one if/then doesn't seem to work
def not_protected_package(name, version, protected):
if name in protected:
if version in protected[name]:
return False
else:
return True
else:
return True
def get_omp_prefs():
# Where should old packages be moved to? User Trash by default
default_where_to_dump = os.path.expanduser('~/.Trash')
omp_prefs_location = os.path.expanduser('~/Library/Preferences/com.github.aysiu.omp.plist')
if os.path.exists(omp_prefs_location):
try:
f = open(omp_prefs_location, 'r+b')
except:
logging.error('Unable to open {}'.format(omp_prefs_location))
sys.exit(1)
try:
omp_prefs = plistlib.load(f)
except:
logging.error('Unable to get contents of {}'.format(omp_prefs_location))
sys.exit(1)
f.close()
# Call function to get dump location from file
where_to_dump = get_dump_location(omp_prefs, default_where_to_dump)
# Call function to get protected packages
protected_packages = get_protected_packages(omp_prefs)
else:
where_to_dump = default_where_to_dump
logging.info('Cannot determine a dump location from {}. Will be dumping '
'to {}.'.format(omp_prefs_location, where_to_dump))
protected_packages = {}
logging.info('Cannot determine a protected packages list from {}. Not '
'protecting any packages.'.format(omp_prefs_location))
return where_to_dump, protected_packages
# Main
def main():
# Try to get the new Munki path
MUNKI_ROOT_PATH = get_munkiimport_prefs()
# Get OMP prefs or use defaults
where_to_dump, protected_packages = get_omp_prefs()
# Where is make catalogs?
makecatalogs = '/usr/local/munki/makecatalogs'
MUNKI_PKGS_DIR_NAME = 'pkgs'
MUNKI_PKGSINFO_DIR_NAME = 'pkgsinfo'
# Join paths based on what's defined
pkgsinfo_path = os.path.join(MUNKI_ROOT_PATH, MUNKI_PKGSINFO_DIR_NAME)
pkgs_path = os.path.join(MUNKI_ROOT_PATH, MUNKI_PKGS_DIR_NAME)
# Check that the paths for the pkgsinfo and pkgs exist
if not os.path.isdir(pkgsinfo_path) and not os.path.isdir(pkgs_path):
logging.error('Your pkgsinfo and pkgs paths are not valid. Please '
'check your repo_url value')
else:
# Make sure all relevant folders are writable
check_folder_writable(pkgsinfo_path)
check_folder_writable(pkgs_path)
check_folder_writable(where_to_dump)
# A list to store all items
all_items = {};
# Lists to store items to delete
pkgs_to_delete = [];
pkgsinfo_to_delete = [];
# Walk through the pkgsinfo files...
for root, dirs, files in os.walk(pkgsinfo_path):
for dir in dirs:
# Skip directories starting with a period
if dir.startswith('.'):
dirs.remove(dir)
for file in files:
# Skip files that start with a period
if file.startswith('.'):
continue
fullfile = os.path.join(root, file)
try:
f = open(fullfile, 'r+b')
except:
logging.error('Unable to open {}'.format(fullfile))
continue
try:
plist = plistlib.load(f)
except:
logging.error('Unable to get contents of {}'.format(fullfile))
continue
plistname = plist['name']
plistversion = plist['version']
# Make sure it's not a protected package
# For some reason, if plistname in protected_packages and plistversion in
# protected_packages[plistname]: won't work combined, so we'll do a
# function test that separates them
if not_protected_package(plistname, plistversion, protected_packages):
# The min OS version key doesn't exist in all pkginfo files
if 'minimum_os_version' in plist:
plistminimum_os_version = plist['minimum_os_version']
else:
plistminimum_os_version = ''
try:
plistcatalogs = plist['catalogs']
except KeyError as err:
logging.error('KeyError occured looking for key {} while checking '
'{}, it does not have a catalog'.format(err, file))
plistcatalogs.sort()
# Some items won't have an installer_item_location: nopkg .plist
# files, for example... that's okay
if 'installer_item_location' in plist:
plistinstaller_item_location = os.path.join(pkgs_path,
plist['installer_item_location'])
else:
plistinstaller_item_location = ''
# Create a dictionary based on the plist values read
plistdict = { 'pkginfo': fullfile,
'version': plistversion,
'catalogs': plistcatalogs,
'installer_item_location': plistinstaller_item_location,
'minimum_os_version': plistminimum_os_version }
# See if the plist name is already in all_items
if plistname in all_items:
# Compare the previously existing one to the currently focused
# one to see if they have the same catalogs (fix this because it
# could be testing production or production testing)
if (all_items[plistname]['catalogs'] == plistcatalogs and
all_items[plistname]['minimum_os_version'] ==
plistminimum_os_version):
# See if this is a newer version than the one in there
if (LooseVersion(plistversion) >
LooseVersion(all_items[plistname]['version'])):
# If this is newer, then move the old one to the items to
# delete list
if( all_items[plistname]['installer_item_location'] != '' ):
pkgs_to_delete.append(all_items[plistname]['installer_item_location'])
pkgsinfo_to_delete.append(all_items[plistname]['pkginfo'])
del all_items[plistname]
all_items[plistname] = plistdict
else:
# Otherwise, if this is older, keep the old one in there,
# and move this one to the delete list
if( plistdict['installer_item_location'] != '' ):
pkgs_to_delete.append(plistdict['installer_item_location'])
pkgsinfo_to_delete.append(plistdict['pkginfo'])
else:
# If it's not in the list already, add it
all_items[plistname] = plistdict
else:
logging.info('Keeping {} version {} because it is a protected '
'package.'.format(plistname, plistversion))
if pkgs_to_delete:
trash_old_stuff(pkgs_to_delete, pkgs_path, where_to_dump)
if pkgsinfo_to_delete:
trash_old_stuff(pkgsinfo_to_delete, pkgsinfo_path, where_to_dump)
if pkgs_to_delete or pkgsinfo_to_delete:
# If /usr/local/munki/makecatalogs exists (it should), then run it to reflect
# the changes or let the user know to run it
if os.path.exists(makecatalogs):
logging.info('Running makecatalogs')
os.system(makecatalogs)
else:
logging.error('{} could not be found. When you have a chance, run '
'makecatalogs on your Munki repo to have the changes '
'reflected.'.format(makecatalogs))
else:
logging.info('Nothing old to dump.')
if __name__ == '__main__':
main()
| aysiu/OldMunkiPackages | payload/usr/local/omp/OldMunkiPackages.py | Python | apache-2.0 | 12,313 |
"""
.. todo::
WRITEME
"""
import numpy as N
import copy
from theano import config
import theano.tensor as T
from pylearn2.utils.rng import make_np_rng
class CosDataset(object):
"""
Makes a dataset that streams randomly generated 2D examples.
The first coordinate is sampled from a uniform distribution.
The second coordinate is the cosine of the first coordinate,
plus some gaussian noise.
"""
def __init__(self, min_x=-6.28, max_x=6.28, std=.05, rng=None):
"""
.. todo::
WRITEME
"""
self.min_x, self.max_x, self.std = min_x, max_x, std
rng = make_np_rng(rng, [17, 2, 946], which_method=['uniform', 'randn'])
self.default_rng = copy.copy(rng)
self.rng = rng
def energy(self, mat):
"""
.. todo::
WRITEME
"""
x = mat[:, 0]
y = mat[:, 1]
rval = (y - N.cos(x)) ** 2. / (2. * (self.std ** 2.))
return rval
def pdf_func(self, mat):
"""
.. todo::
WRITEME properly
This dataset can generate an infinite amount of examples.
This function gives the pdf from which the examples are drawn.
"""
x = mat[:, 0]
y = mat[:, 1]
rval = N.exp(-(y - N.cos(x)) ** 2. / (2. * (self.std ** 2.)))
rval /= N.sqrt(2.0 * N.pi * (self.std ** 2.))
rval /= (self.max_x - self.min_x)
rval *= x < self.max_x
rval *= x > self.min_x
return rval
def free_energy(self, X):
"""
.. todo::
WRITEME properly
This dataset can generate an infinite amount of examples.
This function gives the energy function for the distribution from which the examples are drawn.
"""
x = X[:, 0]
y = X[:, 1]
rval = T.sqr(y - T.cos(x)) / (2. * (self.std ** 2.))
mask = x < self.max_x
mask = mask * (x > self.min_x)
rval = mask * rval + (1 - mask) * 1e30
return rval
def pdf(self, X):
"""
.. todo::
WRITEME properly
This dataset can generate an infinite amount of examples.
This function gives the pdf from which the examples are drawn.
"""
x = X[:, 0]
y = X[:, 1]
rval = T.exp(-T.sqr(y - T.cos(x)) / (2. * (self.std ** 2.)))
rval /= N.sqrt(2.0 * N.pi * (self.std ** 2.))
rval /= (self.max_x - self.min_x)
rval *= x < self.max_x
rval *= x > self.min_x
return rval
def get_stream_position(self):
"""
.. todo::
WRITEME
"""
return copy.copy(self.rng)
def set_stream_position(self, s):
"""
.. todo::
WRITEME
"""
self.rng = copy.copy(s)
def restart_stream(self):
"""
.. todo::
WRITEME
"""
self.reset_RNG()
def reset_RNG(self):
"""
.. todo::
WRITEME
"""
if 'default_rng' not in dir(self):
self.default_rng = N.random.RandomState([17, 2, 946])
self.rng = copy.copy(self.default_rng)
def apply_preprocessor(self, preprocessor, can_fit=False):
"""
.. todo::
WRITEME
"""
raise NotImplementedError()
def get_batch_design(self, batch_size):
"""
.. todo::
WRITEME
"""
x = N.cast[config.floatX](self.rng.uniform(self.min_x, self.max_x,
(batch_size, 1)))
y = N.cos(x) + (N.cast[config.floatX](self.rng.randn(*x.shape)) *
self.std)
rval = N.hstack((x, y))
return rval
| skearnes/pylearn2 | pylearn2/datasets/cos_dataset.py | Python | bsd-3-clause | 3,767 |
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
import zlib
from nova import context
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.tests import fake_network
from nova.tests.integrated.api import client
from nova.tests.integrated import integrated_helpers
import nova.virt.fake
LOG = logging.getLogger(__name__)
class ServersTest(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
_force_delete_parameter = 'forceDelete'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
_access_ipv4_parameter = 'accessIPv4'
_access_ipv6_parameter = 'accessIPv6'
_return_resv_id_parameter = 'return_reservation_id'
_min_count_parameter = 'min_count'
def setUp(self):
super(ServersTest, self).setUp()
self.conductor = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in xrange(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _restart_compute_service(self, *args, **kwargs):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
self.compute = self.start_service('compute', *args, **kwargs)
def test_get_servers(self):
# Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
# Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*args, **kwargs):
raise exception.BuildAbortException(reason='',
instance_uuid='fake')
self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ERROR', found_server['status'])
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
# Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server[self._image_ref_parameter] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server[self._image_ref_parameter] = good_server.get(
self._image_ref_parameter)
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server[self._flavor_ref_parameter] = good_server.get(
self._flavor_ref_parameter)
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertIn("image", server)
self.assertIn("flavor", server)
self._delete_server(created_server_id)
def _force_reclaim(self):
# Make sure that compute manager thinks the instance is
# old enough to be expired
the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
timeutils.set_time_override(override_time=the_past)
ctxt = context.get_admin_context()
self.compute._reclaim_queued_deletes(ctxt)
def test_deferred_delete(self):
# Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Cannot forceDelete unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'forceDelete': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
self._force_reclaim()
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Force delete server
self.api.post_server_action(created_server_id,
{self._force_delete_parameter: {}})
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
found_server = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
#if found_server['status'] != 'deleting':
# break
time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
def _delete_server(self, server_id):
# Delete the server
self.api.delete_server(server_id)
self._wait_for_deletion(server_id)
def test_create_server_with_metadata(self):
# Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
# Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"name": "blah",
self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2",
"metadata": {'some': 'thing'},
}
post['rebuild'].update(self._get_access_ips_params())
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# Cleanup
self._delete_server(created_server_id)
def _get_access_ips_params(self):
return {self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2"}
def _verify_access_ips(self, server):
self.assertEqual('172.19.0.2',
server[self._access_ipv4_parameter])
self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
def test_rename_server(self):
# Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(server_id)
def test_create_multiple_servers(self):
# Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server[self._min_count_parameter] = 2
server[self._return_resv_id_parameter] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertIsNone(found_server)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server_id)
for server_id in server_map.iterkeys():
self._delete_server(server_id)
def test_create_server_with_injected_files(self):
# Creates a server with injected_files.
fake_network.set_stub_network_methods(self.stubs)
personality = []
# Inject a text file
data = 'Hello, World!'
personality.append({
'path': '/helloworld.txt',
'contents': data.encode('base64'),
})
# Inject a binary file
data = zlib.compress('Hello, World!')
personality.append({
'path': '/helloworld.zip',
'contents': data.encode('base64'),
})
# Create server
server = self._build_minimal_create_server_request()
server['personality'] = personality
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest):
_force_delete_parameter = 'force_delete'
_api_version = 'v3'
_image_ref_parameter = 'image_ref'
_flavor_ref_parameter = 'flavor_ref'
_return_resv_id_parameter = 'os-multiple-create:return_reservation_id'
_min_count_parameter = 'os-multiple-create:min_count'
_access_ipv4_parameter = None
_access_ipv6_parameter = None
def _get_access_ips_params(self):
return {}
def _verify_access_ips(self, server):
# NOTE(alexxu): access_ips was demoted as extensions in v3 api.
# So skips verifying access_ips
pass
| afrolov1/nova | nova/tests/integrated/test_servers.py | Python | apache-2.0 | 20,095 |
from .conll import ConllTree
from copy import deepcopy
del_roles = ["aux", "auxpass", "punct", "det", "predet", "cc", "quantmod", "tmod", "prep", "prt"]
attr_roles = ["arg", "acop", "mod", "amod", "nn", "neg", "expl", "poss", "possessive", "attr", "cop"]
append_roles = ["appos", "num", "number", "ref", "sdep"]
coord_roles = ["advcl", "comp", "acomp", "ccomp", "xcomp", "pcomp", "partmod", "advmod", "infmod", "mwe", "11mark",
"rcmod", "npadvmod", "parataxis"]
i_roles = ["agent", "subj", "nsubj", "nsubjpass", "csubj", "csubjpass", "xsubj"]
ii_roles = ["obj", "dobj", "iobj", "pobj"]
conj_roles = ["conj", "preconj"]
def reduce_tree(tree: ConllTree):
tree_copy = deepcopy(tree)
for i in tree_copy:
if i.value.deprel in del_roles:
i.remove()
for i in tree_copy:
value = i.value
if value.deprel in attr_roles:
value.deprel = "ATTR"
elif value.deprel in append_roles:
value.deprel = "APPEND"
elif value.deprel in coord_roles:
value.deprel = "COORD"
elif value.deprel in i_roles:
value.deprel = "I"
elif value.deprel in ii_roles:
value.deprel = "II"
elif value.deprel in conj_roles:
value.deprel = "CONJ"
return tree_copy | greedyrook/pypatent | pypatent/parser/reduce.py | Python | mit | 1,304 |
"""engine.SCons.Tool.sunar
Tool-specific initialization for Solaris (Forte) ar (library archive). If CC
exists, static libraries should be built with it, so that template
instantians can be resolved.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunar.py 2013/03/03 09:48:35 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
if env.Detect('CC'):
env['AR'] = 'CC'
env['ARFLAGS'] = SCons.Util.CLVar('-xar')
env['ARCOM'] = '$AR $ARFLAGS -o $TARGET $SOURCES'
else:
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('r')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -G')
env['SHLINKCOM'] = '$SHLINK $SHLINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
def exists(env):
return env.Detect('CC') or env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| aubreyrjones/libesp | scons_local/scons-local-2.3.0/SCons/Tool/sunar.py | Python | mit | 2,571 |
from module import Klass
from math import pi as PI
from matrix import MAT1, MAT2
def class_decorator(cls):
cls.__call__ = lambda self: print('Cabbage!')
return cls
@class_decorator
class Class(Klass):
@property
def property(self):
temp, ellipsis = self._property
return {temp} if temp%0x12f2 else set()
@property.setter
def property(self, value):
try:
temp = value//0o123
except TypeError:
temp = 1.
def do_something(m):
nonlocal temp
return temp, ...
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
print("I'm", "alive!", sep='\n')
async def func(self : 'Class',
callback : 'callable'
domain : [0b00, PI],
opt : bool=True) -> None:
""" doc string """
self._property = await callback(MAT1 @ MAT2)
r'''(?<!\d+)
\s*?\W # multiline regex comment
\d$'''
x, y, z, path = f(r'[[]'), R'[]]', r'[^a-zA-Z_]', r'C:\Users\Wilson\new'
f'{self.method(x, y, z, {"a": 97})!a:>>12} => {u:0>16b} {{{v!r}}}'
if __name__ == '__main__':
c = Class()
c.func(.12)
c.property = 0b1011101110
| petervaro/python | tests/Python3.py | Python | gpl-3.0 | 1,285 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel ([email protected])
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" ModelSpeciesForm.py
This file ...
"""
from libsignetsim import ModelException
from .ModelParentForm import ModelParentForm
from signetsim.managers.models import renameSbmlIdInModelDependencies
class ModelSpeciesForm(ModelParentForm):
def __init__(self, parent):
ModelParentForm.__init__(self, parent)
self.name = None
self.sbmlId = None
self.value = None
self.constant = None
self.boundaryCondition = False
self.compartment = None
self.unit = None
self.notes = None
self.isConcentration = None
self.SBOTerm = None
def save(self, species):
try:
if self.compartment is not None:
species.setCompartment(self.parent.listOfCompartments[self.compartment])
else:
species.setCompartment(None)
species.setName(self.name)
if species.getSbmlId() != self.sbmlId:
renameSbmlIdInModelDependencies(self.parent.getSbmlModel(), species.getSbmlId(), self.sbmlId)
species.setSbmlId(self.sbmlId)
species.setValue(self.value)
species.constant = self.constant
species.hasOnlySubstanceUnits = not self.isConcentration
species.boundaryCondition = self.boundaryCondition
if self.unit is not None:
species.setUnits(self.parent.listOfUnits[self.unit])
else:
species.setUnits(None)
species.setNotes(self.notes)
species.getAnnotation().setSBOTerm(self.SBOTerm)
except ModelException as e:
self.addError(e.message)
def read(self, request):
self.id = self.readInt(request, 'species_id',
"The indice of the species",
required=False)
self.name = self.readASCIIString(request, 'species_name',
"The name of the species", required=False)
self.sbmlId = self.readASCIIString(request, 'species_sbml_id',
"The identifier of the species")
self.value = self.readFloat(request, 'species_value',
"The value of the species", required=False)
self.isConcentration = self.readTrueFalse(request, 'species_value_type',
"the type of species")
self.compartment = self.readInt(request, 'species_compartment',
"The indice of the compartment of the species",
max_value=len(self.parent.listOfCompartments))
self.unit = self.readInt(request, 'species_unit',
"The indice of the unit of the species",
max_value=len(self.parent.listOfUnits),
required=False)
self.constant = self.readOnOff(request, 'species_constant',
"The constant property of the species")
self.boundaryCondition = self.readOnOff(request, 'species_boundary',
"The boundary condition property of the species")
self.SBOTerm = self.readInt(request, 'species_sboterm',
"The SBO term of the species",
required=False) | msreis/SigNetSim | signetsim/views/edit/ModelSpeciesForm.py | Python | agpl-3.0 | 3,480 |
import sys
import common.stuck
def init_yappi():
import atexit
import yappi
print('[YAPPI START]')
# yappi.set_clock_type('')
yappi.start()
@atexit.register
def finish_yappi():
print('[YAPPI STOP]')
yappi.stop()
print('[YAPPI WRITE]')
stats = yappi.get_func_stats()
for stat_type in ['pstat', 'callgrind', 'ystat']:
print('writing run_stats.{}'.format(stat_type))
stats.save('run_stats.{}'.format(stat_type), type=stat_type)
print('\n[YAPPI FUNC_STATS]')
print('writing run_stats.func_stats')
with open('run_stats.func_stats', 'w') as fh:
stats.print_all(out=fh)
print('\n[YAPPI THREAD_STATS]')
print('writing run_stats.thread_stats')
tstats = yappi.get_thread_stats()
with open('run_stats.thread_stats', 'w') as fh:
tstats.print_all(out=fh)
print('[YAPPI OUT]')
if __name__ == '__main__':
import FetchAgent.server
# init_yappi()
FetchAgent.server.main()
| fake-name/ReadableWebProxy | runFetchAgent.py | Python | bsd-3-clause | 918 |
#!/usr/bin/python
#
# input format:
# lines of verilog binary strings, e.g.
# 1001_10101_10011_1101
# comments beginning with # sign
# lines with just whitespace
#
# output format:
# a module that implements a rom
#
# usage: bsg_ascii_to_rom.py <filename> <modulename>
#
# to compress out zero entries with a default 0 setting:
#
# usage: bsg_ascii_to_rom.py <filename> <modulename> zero
#
import sys;
import os;
import binascii;
zero = 0;
if ((len(sys.argv)==4) and sys.argv[3]=="zero") :
zero = 1;
if ((len(sys.argv)!=3) and (len(sys.argv)!=4)) :
print "Usage ascii_to_rom.py <filename> <modulename>";
exit -1
myFile = open(sys.argv[1],"r");
i = 0;
print "// auto-generated by bsg_ascii_to_rom.py from " + os.path.abspath(sys.argv[1]) + "; do not modify";
print "module " + sys.argv[2] + " #(`BSG_INV_PARAM(width_p), `BSG_INV_PARAM(addr_width_p))";
print "(input [addr_width_p-1:0] addr_i";
print ",output logic [width_p-1:0] data_o";
print ");";
print "always_comb case(addr_i)"
all_zero = set("0_");
for line in myFile.readlines() :
line = line.strip();
if (len(line)!=0):
if (line[0] != "#") :
if (not zero or not (set(line) <= all_zero)) :
digits_only = filter(lambda m:m.isdigit(), str(line));
# http://stackoverflow.com/questions/2072351/python-conversion-from-binary-string-to-hexadecimal
hstr = '%0*X' % ((len(digits_only) + 3) // 4, int(digits_only, 2))
print str(i).rjust(10)+": data_o = width_p ' (" + str(len(digits_only))+ "'b"+line+");"+" // 0x"+hstr;
i = i + 1;
else :
print " // " + line;
if (zero) :
print "default".rjust(10) + ": data_o = { width_p { 1'b0 } };"
else :
print "default".rjust(10) + ": data_o = 'X;"
print "endcase"
print "endmodule"
print "`BSG_ABSTRACT_MODULE(" + sys.argv[2] + ")"
| litex-hub/pythondata-cpu-blackparrot | pythondata_cpu_blackparrot/system_verilog/black-parrot/external/basejump_stl/bsg_mem/bsg_ascii_to_rom.py | Python | bsd-3-clause | 1,921 |
#!/usr/bin/env python3
import os
import subprocess
prefix = os.environ.get('MESON_INSTALL_PREFIX', '/usr')
datadir = os.path.join(prefix, 'share')
# Packaging tools define DESTDIR and this isn't needed for them
if 'DESTDIR' not in os.environ:
print('Compiling gsettings schemas...')
schema_dir = os.path.join(datadir, 'glib-2.0/schemas')
subprocess.call(['glib-compile-schemas', schema_dir])
print('Updating icon cache...')
icon_cache_dir = os.path.join(datadir, 'icons', 'hicolor')
subprocess.call(['gtk-update-icon-cache', '-qtf', icon_cache_dir])
print('Updating desktop database...')
desktop_database_dir = os.path.join(datadir, 'applications')
subprocess.call(['update-desktop-database', '-q', desktop_database_dir])
| bitseater/weather | meson/post_install.py | Python | gpl-3.0 | 765 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-15 08:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0116_tag'),
]
operations = [
migrations.CreateModel(
name='Refund',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('re_id', models.CharField(max_length=27, unique=True)),
('order_no', models.CharField(max_length=27)),
('amount', models.IntegerField(default=0)),
('succeed', models.BooleanField(default=False)),
('status', models.CharField(max_length=10)),
('created', models.DateTimeField(blank=True, null=True)),
('time_succeed', models.DateTimeField(blank=True, null=True)),
('description', models.CharField(max_length=255)),
('failure_code', models.CharField(max_length=10)),
('failure_msg', models.CharField(max_length=30)),
('metadata', models.CharField(max_length=50)),
('transaction_no', models.CharField(max_length=40)),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='charge',
name='body',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='charge',
name='client_ip',
field=models.CharField(max_length=15),
),
migrations.AlterField(
model_name='charge',
name='currency',
field=models.CharField(max_length=3),
),
migrations.AlterField(
model_name='charge',
name='description',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='charge',
name='subject',
field=models.CharField(max_length=32),
),
migrations.AddField(
model_name='refund',
name='charge',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Charge'),
),
]
| malaonline/Server | server/app/migrations/0117_auto_20160315_1659.py | Python | mit | 2,385 |
#!/usr/bin/env python
"""This script solves the Project Euler problem "Prime pair sets". The problem
is: Find the lowest sum for a set of five primes for which any two primes
concatenate to produce another prime.
"""
from __future__ import division
import argparse
import math
from collections import defaultdict
def main(args):
"""Prime pair sets"""
limit = 10
min_set_sum = limit * limit
while min_set_sum == limit * limit:
limit *= 10
min_set_sum = limit * limit
primes = get_primes_up_to(limit)
pair = defaultdict(set)
for prime1 in primes:
for prime2 in primes:
if prime2 <= prime1:
continue
concat1 = int(str(prime1) + str(prime2))
concat2 = int(str(prime2) + str(prime1))
if is_prime(concat1) and is_prime(concat2):
pair[prime1].add(prime2)
try:
primes = sorted(pair.iterkeys())
except AttributeError:
primes = sorted(pair.keys())
for prime in primes:
if prime > min_set_sum:
break
min_set_sum = get_set(pair, pair[prime], [prime], args.set_size,
min_set_sum)
print(min_set_sum)
def get_set(pair, candidates, prime_set, set_size, min_set_sum):
"""Get minimum sum of intersecting sets"""
set_sum = sum(prime_set)
if len(prime_set) == set_size:
if set_sum < min_set_sum:
return(set_sum)
else:
return(min_set_sum)
for prime in sorted(candidates):
if set_sum + prime > min_set_sum:
return(min_set_sum)
intersect = candidates & pair[prime]
new_prime_set = list(prime_set)
new_prime_set.append(prime)
min_set_sum = get_set(pair, intersect, new_prime_set, set_size,
min_set_sum)
return(min_set_sum)
def get_primes_up_to(limit):
"""Get all primes up to specified limit"""
sieve_bound = (limit - 1) // 2 # Last index of sieve
sieve = [False for _ in range(sieve_bound)]
cross_limit = (math.sqrt(limit) - 1) // 2
i = 1
while i <= cross_limit:
if not sieve[i - 1]:
# 2 * $i + 1 is prime, so mark multiples
j = 2 * i * (i + 1)
while j <= sieve_bound:
sieve[j - 1] = True
j += 2 * i + 1
i += 1
primes = [2 * n + 1 for n in range(1, sieve_bound + 1) if not sieve[n - 1]]
primes.insert(0, 2)
return(primes)
def is_prime(num):
"""Test if number is prime"""
if num == 1: # 1 isn't prime
return False
if num < 4: # 2 and 3 are prime
return True
if num % 2 == 0: # Even numbers aren't prime
return False
if num < 9: # 5 and 7 are prime
return True
if num % 3 == 0: # Numbers divisible by 3 aren't prime
return False
num_sqrt = int(math.sqrt(num))
factor = 5
while factor <= num_sqrt:
if num % factor == 0: # Primes greater than three are 6k-1
return False
if num % (factor + 2) == 0: # Or 6k+1
return False
factor += 6
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prime pair sets')
parser.add_argument(
'set_size', metavar='SET_SIZE', type=int, default=5, nargs='?',
help='The target number of primes in the set')
args = parser.parse_args()
main(args)
| iansealy/projecteuler | 60.py | Python | gpl-3.0 | 3,556 |
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
import sys
class FunctionalTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
for arg in sys.argv:
if 'liveserver' in arg:
cls.server_url = 'http://' + arg.split('=')[1]
return
super().setUpClass()
cls.server_url = cls.live_server_url
@classmethod
def tearDownClass(cls):
if cls.server_url == cls.live_server_url:
super().tearDownClass()
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def check_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
| walkerSong/superlists | functional_tests/base.py | Python | gpl-2.0 | 914 |
import argparse
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
from aod_cells.schemata import *
# plot_params = dict(cmap=plt.cm.gray, vmin=0, vmax=1)
plot_params = dict(cmap=plt.cm.gray)
plot_paramsP = dict(cmap=sns.blend_palette(['yellow', 'deeppink'], as_cmap=True), zorder=5)
class CellLabeler:
def __init__(self, X, cells=None, P=None):
self.X = X
self.cells = cells
self.cell_idx = 0 if cells is not None else None
self.cut = OrderedDict(zip(['row', 'col', 'depth'], [0, 0, 0]))
self.P = 0 * self.X
if P is not None:
i, j, k = [(i - j + 1) // 2 for i, j in zip(self.X.shape, P.shape)]
self.P[i:-i, j:-j, k:-k] = P
fig = plt.figure(facecolor='w')
gs = plt.GridSpec(3, 5)
ax = dict()
ax['depth'] = fig.add_subplot(gs[1:3, :2])
ax['row'] = fig.add_subplot(gs[0, :2], sharex=ax['depth'])
ax['col'] = fig.add_subplot(gs[1:3, 2], sharey=ax['depth'])
ax['3d'] = fig.add_subplot(gs[1:3, 3:], projection='3d')
self.fig, self.ax = fig, ax
self.fig.canvas.mpl_connect('scroll_event', self.on_scroll)
self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.fig.canvas.mpl_connect('key_press_event', self.on_key)
self.replot()
plt.show()
def replot(self):
X0 = self.X
P0 = np.asarray(self.P)
P0[P0 < 0.005] = np.nan
row, col, depth = self.cut.values()
nr, nc, nd = self.X.shape[:3]
fig, ax = self.fig, self.ax
for a in ax.values():
a.clear()
color = 'red'
if self.cells is not None and len(self.cells) > 0:
out = np.asarray(list(self.cut.values()), dtype=int)
d = np.sqrt(((self.cells - out) ** 2).sum(axis=1))
if np.any(d <= 5):
color = 'dodgerblue'
ax['row'].imshow(X0[row, :, :].T, **plot_params)
ax['row'].imshow(P0[row, :, :].T, **plot_paramsP)
ax['row'].plot([0, nc], [depth, depth], '-', lw=.5, zorder=10, color=color)
ax['row'].plot([col, col], [0, nd], '-', lw=.5, zorder=10, color=color)
ax['row'].axis('tight')
ax['row'].set_aspect('equal')
ax['row'].axis('off')
ax['row'].set_xlim((0, nc))
ax['row'].set_title('col-depth plane')
ax['col'].imshow(X0[:, col, :], **plot_params)
ax['col'].imshow(P0[:, col, :], **plot_paramsP)
ax['col'].plot([depth, depth], [0, nr], '-', lw=.5, zorder=10, color=color)
ax['col'].plot([0, nd], [row, row], '-', lw=.5, zorder=10, color=color)
ax['col'].axis('tight')
ax['col'].set_aspect('equal')
ax['col'].axis('off')
ax['col'].set_ylim((0, nr))
ax['col'].set_title('row-depth plane')
ax['depth'].imshow(X0[:, :, depth], **plot_params)
ax['depth'].imshow(P0[:, :, depth], **plot_paramsP)
ax['depth'].plot([col, col], [0, nr], '-', lw=.5, zorder=10, color=color)
ax['depth'].plot([0, nc], [row, row], '-', lw=.5, zorder=10, color=color)
ax['depth'].axis('tight')
ax['depth'].set_xlim((0, nc))
ax['depth'].set_ylim((0, nr))
ax['depth'].set_aspect('equal')
ax['depth'].axis('off')
ax['depth'].set_title('row-col plane')
if self.cells is not None and len(self.cells) > 0:
c = self.cells
dz = np.abs(c[:, 2] - out[2]) / 5
dz = dz * (dz <= 1)
for cc, alpha in zip(c[dz > 0], 1 - dz[dz > 0]):
ax['depth'].plot(cc[1], cc[0], 'ok', mfc='dodgerblue', alpha=alpha)
idx = c[:, 2] == depth
if np.any(idx):
ax['depth'].plot(c[idx, 1], c[idx, 0], 'ok', mfc='deeppink', alpha=0.5)
idx = c[:, 0] == row
if np.any(idx):
ax['row'].plot(c[idx, 1], c[idx, 2], 'ok', mfc='deeppink', alpha=0.5)
idx = c[:, 1] == col
if np.any(idx):
ax['col'].plot(c[idx, 2], c[idx, 0], 'ok', mfc='deeppink', alpha=0.5)
ax['3d'].plot(c[:, 0], c[:, 1], c[:, 2], 'ok', mfc='deeppink')
ax['3d'].plot([row, row], [0, nc], [depth, depth], '--', lw=2, color=color)
ax['3d'].plot([row, row], [col, col], [0, nd], '--', lw=2, color=color)
ax['3d'].plot([0, nr], [col, col], [depth, depth], '--', lw=2, color=color)
plt.draw()
def _determine_axes(self, event):
for k, v in self.ax.items():
if event.inaxes == v:
return k
def on_scroll(self, event):
what = self._determine_axes(event)
dimensions = list(self.cut.keys())
if what in dimensions:
i = dimensions.index(what)
k = self.cut[what] + event.step
k = min(self.X.shape[i], max(k, 0))
self.cut[what] = k
self.replot()
def on_key(self, event):
if event.key in ['t', 'r', 'e']:
if event.key == 'e':
self.cell_idx = max(0, self.cell_idx - 1)
elif event.key == 't':
self.cell_idx = min(len(self.cells) - 1, self.cell_idx + 1)
for k, i in zip(self.cut, self.cells[self.cell_idx, :]):
self.cut[k] = i
# if event.key == 's':
# fname = input('Please enter filename:')
# print('Saving')
# self.stack.cells = self.cells
# self.stack.save(fname)
# self.fig.suptitle('File saved to %s' % (fname,))
if event.key == 'a':
new_cell = np.asarray(list(self.cut.values()), dtype=int)
print('Adding new cell at', new_cell)
self.cells = np.vstack((self.cells, new_cell))
self.fig.suptitle('New cell added')
self.replot()
def on_press(self, event):
what = self._determine_axes(event)
if what == 'depth':
self.cut['row'], self.cut['col'] = int(event.ydata), int(event.xdata)
elif what == 'row':
self.cut['depth'], self.cut['col'] = int(event.ydata), int(event.xdata)
elif what == 'col':
self.cut['depth'], self.cut['row'] = int(event.xdata), int(event.ydata)
if what is not None:
if event.button == 1:
new_cell = np.asarray(list(self.cut.values()), dtype=int)
print('Adding new cell at', new_cell)
if self.cells is None:
self.cells = new_cell[None, :]
else:
self.cells = np.vstack((self.cells, new_cell))
if event.button == 3:
out = np.asarray(list(self.cut.values()), dtype=int)
d = abs(self.cells - out).sum(axis=1)
self.cells = self.cells[d > 3, :]
self.replot()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Manually label cells in a stack.')
# parser.add_argument('file', type=str, help='hdf5 file containing the stack (dims row, col, depth, 1, channels)')
# parser.add_argument('--probability', type=str, help='numpy file containing the probability map for file')
#
# args = parser.parse_args()
# s = Stack(args.file,
# preprocessor=lambda x: average_channels(whiten(unsharp_masking(medianfilter(center(x.squeeze()))))))
# if args.probability:
# P = np.load(args.probability)
# else:
# P = None
stacks = Stacks().project().fetch.as_dict()
for i, key in enumerate(stacks):
print(i, '\t'.join(key.values()))
key = stacks[int(input('Please select dataset: '))]
cells = (CellLocations() & key).project().fetch.as_dict()
if len(cells) > 0:
for i, ckey in enumerate(cells):
print(i, '\t'.join(ckey.values()))
selection = input('Do you want to load a set of locations? [press enter for no] ')
if len(selection) > 0:
key = cells[int(selection)]
cells = (CellLocations() & key).fetch1['cells']
else:
cells = None
prep = list(preprocessors.keys())
for i, name in enumerate(prep):
print(i, name)
key['preprocessing'] = prep[int(input('Please select the preprocessing. '))]
X = Stacks().load(key)
labeler = CellLabeler(X, cells)
| cajal/cell_detector | scripts/label_cells.py | Python | mit | 8,354 |
#!/usr/bin/python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
f = open(sys.argv[1] + ".cc", "w")
f.write("""\
#include <stdio.h>
int main() {
puts("Hello %s");
return 0;
}
""" % sys.argv[1])
f.close()
| dtebbs/gyp | test/rules/src/rule.py | Python | bsd-3-clause | 331 |
#===islucyplugin===
# -*- coding: utf-8 -*-
# note_plugin.py
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sqlite3 as db
import time, os
def querynote(dbpath,query):
cursor,connection = None, None
try:
connection=db.connect(dbpath)
cursor=connection.cursor()
cursor.execute(query)
result=cursor.fetchall()
connection.commit()
cursor.close()
connection.close()
return result
except:
if cursor:
cursor.close()
if connection:
connection.commit()
connection.close()
return ''
def get_jid(gch, nick):
nick = nick.replace('"','"')
sql = 'SELECT jid FROM users WHERE nick="%s";' % (nick)
qres = querynote('settings/'+gch+'/users.db',sql)
if qres:
jid = qres[0][0]
return jid
def show_notes(gch, notes, pref='',miff='',start=0,end=10):
rng = []
if notes:
if start == 0 and end == 10:
if len(notes) >= 10:
rng = range(10)
else:
rng = range(len(notes))
else:
rng = range(end-start)
nosli = ['%s) %s%s%s%s:\n%s' % (li+start+1,pref,time.strftime('%d.%m.%Y',time.localtime(float(notes[li+start][0]))),miff,time.strftime('%H:%M:%S',time.localtime(float(notes[li+start][0]))), notes[li+start][1].replace('"','"')) for li in rng]
return nosli
def del_note(gch, notes_id, note):
del_sql = 'DELETE FROM %s WHERE note="%s";' % (notes_id, note)
res=querynote('settings/'+gch+'/notes.db',del_sql)
return res
def delall_notes(gch, notes_id):
drop_sql = 'DROP TABLE %s;' % (notes_id)
res=querynote('settings/'+gch+'/notes.db',drop_sql)
return res
def get_notes(gch,notes_id):
sql = 'SELECT * FROM %s ORDER BY ndate DESC;' % (notes_id)
notes = querynote('settings/'+gch+'/notes.db',sql)
return notes
def check_notes_id(gch,notes_id):
sql = 'SELECT * FROM notes WHERE id="%s";' % (notes_id)
qres = querynote('settings/'+gch+'/notes.db',sql)
if qres:
return False
else:
return True
def get_notes_id(gch,jid):
sql = 'SELECT id FROM notes WHERE jid="%s";' % (jid)
notes_id = querynote('settings/'+gch+'/notes.db',sql)
if notes_id:
return notes_id[0][0]
def note_add(gch,jid,note,notes_id=''):
if not notes_id:
notes_id = 'notes'+str(random.randrange(10000000, 99999999))
chk_ntsid = check_notes_id(gch,notes_id)
while not chk_ntsid:
notes_id = 'notes'+str(random.randrange(10000000, 99999999))
chk_ntsid = check_notes_id(gch,notes_id)
sql = 'INSERT INTO notes (jid,id) VALUES ("%s","%s");' % (jid,notes_id)
res = querynote('settings/'+gch+'/notes.db',sql)
sql = 'CREATE TABLE %s (ndate varchar not null, note varchar not null, unique(note));' % (notes_id)
res = querynote('settings/'+gch+'/notes.db',sql)
note = note.replace(r'"', r'"')
date = time.time()
sql = 'INSERT INTO %s (ndate,note) VALUES ("%s","%s");' % (notes_id,date,note)
res = querynote('settings/'+gch+'/notes.db',sql)
if res == '':
sql = 'CREATE TABLE %s (ndate varchar not null, note varchar not null, unique(note));' % (notes_id)
res = querynote('settings/'+gch+'/notes.db',sql)
sql = 'INSERT INTO %s (ndate,note) VALUES ("%s","%s");' % (notes_id,date,note)
res = querynote('settings/'+gch+'/notes.db',sql)
return res
def get_note_state(gch):
if not os.path.exists('settings/'+gch+'/notes.db'):
sql = 'CREATE TABLE notes (jid varchar not null, id varchar not null, unique(jid,id));'
res = querynote('settings/'+gch+'/notes.db',sql)
def handler_notes(type, source, parameters, recover=False, jid='', rcts=''):
groupchat = source[1]
nick = source[2]
if not GROUPCHATS.has_key(groupchat):
reply(type, source, u'This command can be used only in the conference!')
return
jid = get_jid(groupchat,nick)
notes_id = get_notes_id(groupchat,jid)
notes = get_notes(groupchat,notes_id)
tonos = len(notes)
#-----------------------Local Functions--------------
def add_note(type,source,groupchat,jid,parameters,notes_id):
res = note_add(groupchat,jid,parameters,notes_id)
if res != '':
reply(type, source, u'Successfully added!')
else:
reply(type, source, u'Failed adding! Perhaps this article already exists!')
def out_notes(type,source,groupchat,notes,tonos,stn,enn):
notl = show_notes(groupchat, notes, u'Записано ',u' в ',stn-1,enn)
head = ''
foot = ''
if stn >= 2 and stn != enn:
head = u'[<---beginning---]\n\n'
if enn < tonos and stn != enn:
foot = u'\n\n[---ending--->]'
elif enn == tonos and tonos == 10:
foot = ''
if notl:
if type == 'public':
if stn == enn:
rep = u'Note (total: %s):\n%s%s%s' % (tonos,head,'\n\n'.join(notl),foot)
else:
rep = u'Notes (total: %s):\n%s%s%s' % (tonos,head,'\n\n'.join(notl),foot)
reply(type, source, u'Look in private!')
reply('private', source, rep)
else:
if stn == enn:
rep = u'Note (total: %s):\n%s%s%s' % (tonos,head,'\n\n'.join(notl),foot)
else:
rep = u'Notes (total: %s):\n%s%s%s' % (tonos,head,'\n\n'.join(notl),foot)
reply(type, source, rep)
else:
rep = u'Нет заметок!'
reply(type, source, rep)
#--------------------End Of Local Functions----------
if parameters:
spltdp = parameters.split(' ',1)
nnote = spltdp[0]
if len(spltdp) == 1:
if '-' in nnote:
nnote = nnote.split('-',1)
nnote = [li for li in nnote if li != '']
if len(nnote) == 2:
if nnote[0].isdigit():
stn = int(nnote[0])
if not stn:
add_note(type,source,groupchat,jid,parameters,notes_id)
return
else:
add_note(type,source,groupchat,jid,parameters,notes_id)
return
if nnote[1].isdigit():
enn = int(nnote[1])
if enn > tonos:
add_note(type,source,groupchat,jid,parameters,notes_id)
return
else:
add_note(type,source,groupchat,jid,parameters,notes_id)
return
if stn > enn:
add_note(type,source,groupchat,jid,parameters,notes_id)
return
out_notes(type,source,groupchat,notes,tonos,stn,enn)
elif len(nnote) == 1:
if nnote[0].isdigit():
nno = int(nnote[0])
if nno > tonos or nno == 0:
add_note(type,source,groupchat,jid,parameters,notes_id)
return
note = notes[nno-1][1].strip()
res = del_note(groupchat, notes_id, note)
if res != '':
reply(type, source, u'Note number %s deleted!' % (nno))
else:
reply(type, source, u'Error deletion!')
else:
add_note(type,source,groupchat,jid,parameters,notes_id)
elif not nnote:
delall_notes(groupchat, notes_id)
reply(type, source, u'Notes cleaned!')
else:
if nnote.isdigit():
if int(nnote) != 0 and int(nnote) <= tonos:
nnote = int(nnote)
out_notes(type,source,groupchat,notes,tonos,nnote,nnote)
else:
add_note(type,source,groupchat,jid,parameters,notes_id)
else:
add_note(type,source,groupchat,jid,parameters,notes_id)
else:
add_note(type,source,groupchat,jid,parameters,notes_id)
else:
out_notes(type,source,groupchat,notes,tonos,1,10)
register_stage1_init(get_note_state)
register_command_handler(handler_notes, 'note', ['info','fun','all','*'], 11, 'Allows users (with access 11 and above) to keep personal notes. Without arguments displays the first 10 notes, if more than 10, or all if less than 10 or 10 if it is only 10. When you specify a number displays the article with that number: %snote 4. When you specify a minus sign before the number, delete a note of this number, example: %snote -7. When you specify a range in the format <beginning>-<ending>, displays notes beginning with a specified boundary <beginning> and to the number specified boundary <ending>, example: %snote 3-8. When you specify the text, adds a new note, example: %snote Sample notes.', 'note [<number>|<Beginning>-<ending>|-<number>|<text>]', ['note','note 5','note 3-7','note -4','note Note, anything.']) | XtremeTeam/Lucy-bot | brain/plugins/note.py | Python | gpl-2.0 | 8,369 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11b1 on 2017-06-18 18:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gwasdb', '0004_remove_snp_name'),
]
operations = [
migrations.RemoveField(
model_name='association',
name='snp',
),
migrations.RemoveField(
model_name='association',
name='study',
),
migrations.RemoveField(
model_name='snp',
name='gene',
),
migrations.RemoveField(
model_name='snp',
name='genotype',
),
migrations.DeleteModel(
name='Association',
),
migrations.DeleteModel(
name='Gene',
),
migrations.DeleteModel(
name='SNP',
),
]
| 1001genomes/AraGWAS | aragwas_server/gwasdb/migrations/0005_auto_20170618_1847.py | Python | mit | 908 |
# encoding: utf-8
"""
API classes for dealing with presentations and other objects one typically
encounters as an end-user of the PowerPoint user interface.
"""
from __future__ import absolute_import
import os
from pptx.opc.constants import RELATIONSHIP_TYPE as RT
from pptx.opc.package import OpcPackage
from pptx.parts.coreprops import CoreProperties
from pptx.parts.image import ImageCollection
from pptx.util import lazyproperty
class Package(OpcPackage):
"""
Return an instance of |Package| loaded from *file*, where *file* can be a
path (a string) or a file-like object. If *file* is a path, it can be
either a path to a PowerPoint `.pptx` file or a path to a directory
containing an expanded presentation file, as would result from unzipping
a `.pptx` file. If *file* is |None|, the default presentation template is
loaded.
"""
# path of the default presentation, used when no path specified
_default_pptx_path = os.path.join(
os.path.split(__file__)[0], 'templates', 'default.pptx'
)
def after_unmarshal(self):
"""
Called by loading code after all parts and relationships have been
loaded, to afford the opportunity for any required post-processing.
"""
# gather image parts into _images
self._images.load(self.parts)
@lazyproperty
def core_properties(self):
"""
Instance of |CoreProperties| holding the read/write Dublin Core
document properties for this presentation. Creates a default core
properties part if one is not present (not common).
"""
try:
return self.part_related_by(RT.CORE_PROPERTIES)
except KeyError:
core_props = CoreProperties.default()
self.relate_to(core_props, RT.CORE_PROPERTIES)
return core_props
@classmethod
def open(cls, pkg_file=None):
"""
Return |Package| instance loaded with contents of .pptx package at
*pkg_file*, or the default presentation package if *pkg_file* is
missing or |None|.
"""
if pkg_file is None:
pkg_file = cls._default_pptx_path
return super(Package, cls).open(pkg_file)
@property
def presentation(self):
"""
Reference to the |Presentation| instance contained in this package.
"""
return self.main_document
@lazyproperty
def _images(self):
"""
Collection containing a reference to each of the image parts in this
package.
"""
return ImageCollection()
| Hitachi-Data-Systems/org-chart-builder | pptx/package.py | Python | apache-2.0 | 2,601 |
#encoding: utf-8
"""Tornado handlers for the terminal emulator."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import tornado
from tornado import web
import terminado
from ..base.handlers import IPythonHandler
class TerminalHandler(IPythonHandler):
"""Render the terminal interface."""
@web.authenticated
def get(self, term_name):
self.write(self.render_template('terminal.html',
ws_path="terminals/websocket/%s" % term_name))
class TermSocket(terminado.TermSocket, IPythonHandler):
def get(self, *args, **kwargs):
if not self.get_current_user():
raise web.HTTPError(403)
return super(TermSocket, self).get(*args, **kwargs)
def clear_cookie(self, *args, **kwargs):
"""meaningless for websockets"""
pass
| madelynfreed/rlundo | venv/lib/python2.7/site-packages/IPython/html/terminal/handlers.py | Python | gpl-3.0 | 861 |
import numpy as np
import json
import datetime
import os, glob
from SEDMDb.SedmDb import SedmDB
from pandas import DataFrame
import psycopg2
from psycopg2 import extras
from sqlalchemy.exc import IntegrityError
import requests
import SEDMrph.fitsutils as fitsutils
db = SedmDB(host='localhost', dbname='sedmdb')
def jsonify(dbout, names):
"""
Receive a response from an SQL DB and converts it into a json file with the names provided.
"""
if len(dbout) ==0:
return {}
elif len(dbout[0]) != len(names):
return {"message": "Lenght of SQL response different from length of names."}
else:
json = []
for o in dbout:
j = {}
for i, n in enumerate(names):
j[n] = o[i]
json.append(j)
return json
def updateFollowupConfig(entryUpdate):
message = ""
if not "Followup" in entryUpdate or not entryUpdate['Followup']:
message += "STATUS: 422 Invalid Input\n\nFollowup option not specified"
return
message += "Content-type: text/html"
message += "\n"
message += "<title>Test CGI</title>"
output = open('/home/sedm/kpy/flask/static/test_output%s.txt' % datetime.datetime.utcnow().strftime("%H_%M_%S"),'w')
allFields = ""
data = json.dumps(entryUpdate)
#for k,v in entryUpdate:
# allFields += k + ":" + v + "\n "
# output.write(allFields)
output.write(data)
output.close()
return message
def get_request_by_id(request_id):
"""
Grab pertinent information on the request id and create a page that can
be used to update the request
:param request_id:
:return:
"""
request_query = """SELECT r.id as req_id, r.object_id as obj_id,
r.user_id, r.marshal_id, r.exptime, r.maxairmass,
r.max_fwhm, r.min_moon_dist, r.max_moon_illum,
r.max_cloud_cover, r.status,
r.priority as reqpriority, r.inidate, r.enddate,
r.cadence, r.phasesamples, r.sampletolerance,
r.filters, r.nexposures, r.obs_seq, r.seq_repeats,
r.seq_completed, r.last_obs_jd, r.creationdate,
r.lastmodified, r.allocation_id, r.marshal_id,
o.id as objid, o.name as objname, o.iauname, o.ra, o."dec",
o.typedesig, o.epoch, o.magnitude, o.creationdate,
u.id as user_id, u.email, a.id as all_id, a.inidate, a.enddate, a.time_spent,
a.time_allocated, a.program_id, a.active,
p.designator, p.name, p.group_id, p.pi,
p.time_allocated, r.priority, p.inidate,
p.enddate
FROM "public".request r
INNER JOIN "public"."object" o ON ( r.object_id = o.id )
INNER JOIN "public".users u ON ( r.user_id = u.id )
INNER JOIN "public".allocation a ON ( r.allocation_id = a.id )
INNER JOIN "public".program p ON ( a.program_id = p.id )
WHERE r.id = %s
""" % request_id
conn = psycopg2.connect("dbname=sedmdb user=sedmuser "
"host=localhost "
"password=user$edm1235")
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(request_query)
results = cursor.fetchone()
if isinstance(results, psycopg2.extras.DictRow):
obs_dict = parse_obs(results['obs_seq'], results['exptime'])
else:
obs_dict = ''
return results, obs_dict
def parse_obs(sequence, exptime_sequence):
"""
Parse all available filters
:param seq:
:param exptime:
:return:
"""
flt_list = ['ifu', 'r', 'g', 'i', 'u']
seq = list(sequence)
exptime = list(exptime_sequence)
obs_dict = {}
print seq, exptime
for flt in flt_list:
index = [i for i, s in enumerate(seq) if flt in s]
if index:
obs_dict['%s_checked' % flt] = 'checked'
obs_dict['%s_exptime' % flt] = exptime[index[0]]
obs_dict['%s_repeat' % flt] = int(seq[index[0]].replace(flt,""))
for i in index:
seq.pop(index[0])
exptime.pop(index[0])
else:
obs_dict['%s_checked' % flt] = ''
obs_dict['%s_exptime' % flt] = 0
obs_dict['%s_repeat' % flt] = 0
return obs_dict
def delete_request_by_id(id):
"""
:param id:
:return:
"""
obs_dict = {'id': int(id),
'status': 'CANCELED'}
ret = db.update_request(obs_dict)
print ret
return "Canceled"
def parse_update(update_dict):
"""
:param update_dict:
:return:
"""
if update_dict['type'] == 'priority':
print int(update_dict['id'])
print db.update_request({'id': int(update_dict['id']),
'priority': update_dict['priority']})
elif update_dict['type'] == 'status':
print db.update_request({'id': int(update_dict['id']),
'status': update_dict['status']})
elif update_dict['type'] == 'filters':
pass
return {'response': True}
def search_stats_file(mydate = None):
'''
Returns the last stats file that is present in the system according to the present date.
It also returns a message stating what date that was.
'''
#If the date is specified, we will try to located the right file.
#None will be returned if it does not exist.
if ( not mydate is None):
s= os.path.join("/scr2/sedm/phot", mydate, "stats/stats.log")
if os.path.isfile(s) and os.path.getsize(s) > 0:
return s, mydate
else:
return None, None
else:
curdate = datetime.datetime.utcnow()
#Try to find the stat files up to 100 days before today's date.
i = 0
while i < 100:
newdate = curdate - datetime.timedelta(i)
newdatedir = "%d%02d%02d"%(newdate.year, newdate.month, newdate.day)
s = os.path.join("/scr2/sedm/phot", newdatedir, "stats/stats.log")
if os.path.isfile(s) and os.path.getsize(s) > 0:
return s, newdatedir
i = i+1
return None, None
def search_redux_files(mydate = None, user=None):
'''
Returns the files that are present in the disk at a given date.
It also returns a message stating what date that was.
TODO: This routine that looks in the disk, should look at the database and retrieve only the files which
correspond to the privileges of the user.
'''
#If the date is specified, we will try to located the right file.
#None will be returned if it does not exist.
if ( not mydate is None):
basedir = os.path.join("/scr2/sedmdrp/redux/", mydate)
patterns = ["*SEDM.txt", "image*png", "*_SEDM*png", "cube*png", "Standard_Correction.png", \
"spec_*.png", "spec_*.txt", "ifu_spaxels*.png", "*flat3d.png", "*wavesolution_dispersionmap.png"]
files = []
for p in patterns:
files.extend(glob.glob(os.path.join(basedir, p)))
if len(files) == 0:
files = None
else:
curdate = datetime.datetime.utcnow()
#Try to find the stat files up to 100 days before today's date.
i = 0
files = None
while i < 100:
newdate = curdate - datetime.timedelta(i)
newdatedir = "%d%02d%02d"%(newdate.year, newdate.month, newdate.day)
basedir = os.path.join("/scr2/sedmdrp/redux/", newdatedir)
patterns = ["*SEDM.txt", "image*png", "*_SEDM*png", "cube*png", "Standard_Correction.png", \
"spec_*.png", "spec_*.txt", "ifu_spaxels*.png", "*flat3d.png", "*wavesolution_dispersionmap.png"]
files = []
for p in patterns:
files.extend(glob.glob(os.path.join(basedir, p)))
if len(files) > 0:
mydate = newdatedir
break
i = i+1
if not files is None:
filenames = [os.path.basename(f) for f in files]
d = {'filename':filenames}
df = DataFrame.from_records(d)
else:
df = None
return df, mydate
def search_phot_files(mydate = None, user=None):
'''
Returns the files that are present in the disk at a given date.
It also returns a message stating what date that was.
TODO: This routine that looks in the disk, should look at the database and retrieve only the files which
correspond to the privileges of the user.
'''
#If the date is specified, we will try to located the right file.
#None will be returned if it does not exist.
if ( not mydate is None):
files = glob.glob(os.path.join("/scr2/sedm/phot/", mydate, "reduced/png/*.png"))
filesraw = glob.glob(os.path.join("/scr2/sedm/phot/", mydate, "pngraw/*all.png"))
files = files + filesraw
if len(files) == 0:
files = None
else:
curdate = datetime.datetime.utcnow()
#Try to find the stat files up to 100 days before today's date.
i = 0
files = []
while i < 100:
newdate = curdate - datetime.timedelta(i)
newdatedir = "%d%02d%02d"%(newdate.year, newdate.month, newdate.day)
files = glob.glob(os.path.join("/scr2/sedm/phot", newdatedir, "reduced/png/*.png"))
filesraw = glob.glob(os.path.join("/scr2/sedm/phot", newdatedir, "pngraw/*all.png"))
files = files + filesraw
if len(files) > 0:
mydate = newdatedir
break
i = i+1
if not files is None:
files.sort(reverse=True)
d = {'filename':files}
df = DataFrame.from_records(d)
else:
df = None
return df, mydate
def search_finder_files(mydate = None, user=None):
'''
Returns the files that are present in the disk at a given date.
It also returns a message stating what date that was.
TODO: This routine that looks in the disk, should look at the database and retrieve only the files which
correspond to the privileges of the user.
'''
#If the date is specified, we will try to located the right file.
#None will be returned if it does not exist.
if ( not mydate is None):
files = glob.glob(os.path.join("/scr2/sedm/phot", mydate, "finders/*ACQ*.jpg"))
files = files + glob.glob(os.path.join("/scr2/sedm/phot", mydate, "finders/*ACQ*.png"))
if len(files) == 0:
files = None
else:
curdate = datetime.datetime.utcnow()
#Try to find the stat files up to 100 days before today's date.
i = 0
files = []
while i < 100:
newdate = curdate - datetime.timedelta(i)
newdatedir = "%d%02d%02d"%(newdate.year, newdate.month, newdate.day)
files = glob.glob(os.path.join("/scr2/sedm/phot", newdatedir, "finders/*ACQ*.jpg"))
files = files + glob.glob(os.path.join("/scr2/sedm/phot", newdatedir, "finders/*ACQ*.png"))
if len(files) > 0:
mydate = newdatedir
break
i = i+1
if not files is None:
files.sort(reverse=True)
d = {'filename':files}
df = DataFrame.from_records(d)
else:
df = None
return df, mydate
def search_phot_files_by_imtype(mydate = None, user=None):
'''
Returns the files that are present in the disk at a given date.
It also returns a message stating what date that was.
TODO: This routine that looks in the disk, should look at the database and retrieve only the files which
correspond to the privileges of the user.
'''
#If the date is specified, we will try to located the right file.
#None will be returned if it does not exist.
filedic = {}
files = glob.glob(os.path.join("/scr2/sedm/phot", mydate, "rc*.fits"))
for f in files:
imtype = fitsutils.get_par(f, "IMGTYPE").title()
prev = filedic.get(imtype, [])
path, fits = os.path.split(f)
prev.extend([os.path.join(path, "pngraw", fits.replace(".fits", "_all.png") )])
filedic[imtype] = prev
return filedic
def get_requests_for_user(user_id, inidate, enddate):
'''
Obtains the DataFrame for the requests that were made:
- By any member of the group where the user with user_id belongs to.
- In the last 5 days
'''
request_query = ("""SELECT a.designator, o.name, o.ra, o.dec, r.inidate, r.enddate, r.priority, r.status, r.lastmodified, r.id
FROM request r, object o, allocation a
WHERE o.id = r.object_id AND a.id = r.allocation_id
AND ( r.lastmodified >= DATE('%s') AND r.lastmodified <= DATE('%s') )
AND r.allocation_id IN
(SELECT a.id
FROM allocation a, groups g, usergroups ug, users u, program p
WHERE ug.user_id = u.id AND ug.group_id = g.id AND u.id = %d AND p.group_id = g.id AND a.program_id = p.id
) ORDER BY r.lastmodified DESC;"""% (inidate, enddate, user_id))
req = db.execute_sql(request_query)
req = DataFrame(req, columns=['allocation', 'object', 'RA', 'DEC', 'start date', 'end date', 'priority','status', 'lastmodified', 'UPDATE'])
if user_id in [189, 2, 20180523190352189]:
req['UPDATE'] = req['UPDATE'].apply(convert_to_link)
#req['object'] = req['object'].apply(convert_to_growth_link)
else:
req.drop(columns=['UPDATE', 'RA', 'DEC'])
return req
def convert_to_link(reqid):
return """<a href='view_request?id=%s'>+</a>""" % reqid
def convert_to_growth_link(reqid):
return """<a href='http://skipper.caltech.edu:8080/cgi-bin/growth/view_source.cgi?name=%s'>%s</a>""" % (reqid, reqid)
def get_info_user(username):
'''
Returns a json dictionary with the values for the user.
The fields are:
- message: what message we can get from the DB.
- username:
- name:
- email:
- user_id
- list of active allocations of the user
- list of current groups the user belongs to
- list of groups the user does not belong to
'''
if (username==""):
user_info = None
elif ('"' in username):
username = username.split('"')[1]
user_info = db.execute_sql("SELECT username, name, email, id FROM users WHERE username ='{0}'".format(username))
elif ("'" in username):
username = username.split("'")[1]
user_info = db.execute_sql("SELECT username, name, email, id FROM users WHERE username ='{0}'".format(username))
else:
userlower = username.lower()
user_info = db.execute_sql("SELECT username, name, email, id FROM users WHERE LOWER(username) LIKE '%{0}%' OR LOWER(name) LIKE '%{1}%' OR LOWER(email) LIKE '%{2}%@%'".format(userlower, userlower, userlower))
if (not user_info is None and len(user_info)==1):
user_info = user_info[0]
username = user_info[0]
name = user_info[1]
email = user_info[2]
user_id = user_info[3]
user_allocations = db.execute_sql("""SELECT g.designator, p.designator, a.designator, a.inidate, a.enddate
FROM allocation a, program p, groups g, users u, usergroups ug
WHERE ug.user_id = u.id AND ug.group_id = g.id
AND a.program_id=p.id AND p.group_id = g.id AND a.active=TRUE
AND u.id = %d order by g.designator; """%user_id)
allocations = jsonify(user_allocations, names=["group", "program", "allocation", "inidate", "enddate"])
old_groups = db.execute_sql("""SELECT DISTINCT g.designator
FROM groups g, usergroups ug, users u
WHERE ug.user_id = u.id AND ug.group_id=g.id AND ug.user_id = u.id AND u.id=%d
ORDER BY g.designator;"""%user_id)
new_groups = db.execute_sql("""SELECT DISTINCT g.designator
FROM groups g
WHERE g.id NOT IN
(SELECT DISTINCT group_id
FROM usergroups ug
WHERE ug.user_id=%d) ORDER BY g.designator;"""%user_id)
u = { "message":"User found.", "username":username, "name":name, "email":email, "id":user_id, \
"allocations":allocations, "old_groups":old_groups, "new_groups":new_groups}
elif user_info is None or len(user_info)==0:
u = {"message": "Found no user with your search criteria. Try with a different name / username / email username."}
elif len( user_info) > 1:
users = [u[0] for u in user_info]
u = {"message": "Found %d users with same search criteria. Use \"\" for exact search. Choose one from: %s"%(len(user_info), users)}
else:
u = {"message": "Other issue when searching for the user. %d user found"%len(user_info)}
return u
def add_group(user_id, g_descriptor):
'''
Adds the association between the user and the group in the table usergrouops.
'''
group_id= db.execute_sql("SELECT id FROM groups WHERE designator='{0}';".format(g_descriptor))[0][0]
db.execute_sql("INSERT INTO usergroups values (%d, %d);"%(user_id, group_id))
def remove_group(user_id, g_descriptor):
'''
Removes the association between the user and the group in the table usergrouops.
'''
group_id= db.execute_sql("SELECT id FROM groups WHERE designator='{0}';".format(g_descriptor))[0][0]
db.execute_sql("DELETE FROM usergroups ug WHERE ug.user_id=%d AND ug.group_id=%d;"%(user_id, group_id))
def get_p18obsdata(obsdate):
"""
:param obsdate: Must be in "Year-Month-Day" or "YYYYMMDD" format
:return: List of dates and average seeing
"""
#1. Create the URL to get the seeing for the requested night
p18date = []
p18seeing = []
if "-" in obsdate:
f = datetime.datetime.strptime(obsdate, "%Y-%m-%d") - datetime.timedelta(days=1)
else:
f = datetime.datetime.strptime(obsdate, "%Y%m%d") - datetime.timedelta(days=1)
y, m, d = [f.strftime("%Y"), int(f.strftime("%m")), int(f.strftime("%d"))]
p18obsdate = "%s-%s-%s" % (y, m, d)
#2. Get the data from the link
page = requests.get('http://nera.palomar.caltech.edu/P18_seeing/seeing_log_%s.log' % p18obsdate)
data = page.content
#3. Split the page by newlines
data = data.split('\n')
#4. Loop through the data and only use points that have 4 or more seeing values to average
for i in data:
i = i.split()
if len(i) > 5 and int(i[5]) > 4:
d ='%s %s' %(i[1], i[0])
p18date.append(datetime.datetime.strptime(d, "%m/%d/%Y %H:%M:%S")
+ datetime.timedelta(hours=8))
p18seeing.append(float(i[4]))
return p18date, p18seeing
def get_allocations_user(user_id):
res = db.execute_sql(""" SELECT a.id, a.designator, p.designator, g.designator, a.time_allocated, a.time_spent
FROM allocation a, program p, groups g, usergroups ug
WHERE a.program_id = p.id AND p.group_id = g.id
AND g.id = ug.group_id AND a.active is True AND ug.user_id = %d"""%(user_id))
# create the dataframe and set the allocation names to be linked
data = DataFrame(res, columns=['id', 'allocation', 'program', 'group', 'time allocated', 'time spent'])
return data
def get_allocations():
allocations = db.execute_sql("""SELECT a.id, a.designator, p.designator, p.name, a.inidate, a.enddate, a.time_allocated, a.time_spent, a.active
FROM allocation a, program p
WHERE a.program_id=p.id
ORDER BY a.id DESC; """)
allocations = jsonify(allocations, names=["id", "name", "program", "description", "inidate", "enddate", "time_allocated", "time_spent", "active"])
return allocations
def get_programs():
programs = db.execute_sql("""SELECT p.id, p.designator, p.name, g.designator, p.pi, p.priority
FROM program p, groups g
WHERE p.group_id = g.id
ORDER BY p.id DESC; """)
programs = jsonify(programs, names=["id", "designator", "name", "group", "pi", "priority"])
return programs
def get_all_programs():
programs = db.get_from_program(["id", "designator"])
programs = jsonify(programs, names=["id", "name"])
return programs
def get_all_groups():
groups = db.execute_sql("SELECT id, designator FROM groups;")
groups = jsonify(groups, names=["id", "designator"])
return groups
def delete_allocation(id):
alloc = db.execute_sql("SELECT * FROM allocation where id=%d;"%id)
if len(alloc) > 0:
db.execute_sql("DELETE FROM allocation where id=%d"%id)
status = 0
message = "Delected allocation with ID %d"%id
else:
status = -1
message = "No allocation found to delete with ID %d"%id
return (status, message)
def delete_program(id):
prog = db.execute_sql("SELECT * FROM program where id=%d;"%id)
if len(prog) > 0:
db.execute_sql("DELETE FROM program where id=%d"%id)
status = 0
message = "Delected program with ID %d"%id
else:
status = -1
message = "No program found to delete with ID %d"%id
return (status, message)
def delete_group(id):
group = db.execute_sql("SELECT * FROM groups where id=%d;"%id)
if len(group) > 0:
db.execute_sql("DELETE FROM groups where id=%d"%id)
status = 0
message = "Delected group with ID %d"%id
else:
status = -1
message = "No group found to delete with ID %d"%id
return (status, message)
def get_allocation_stats(user_id, inidate=None, enddate=None):
"""
Obtains a list of allocations that belong to the user and
query the total allocated name and time spent for that allocation.
If no user_id is provided, all active allocations are returned.
"""
if (user_id is None):
res = db.get_from_allocation(["designator", "time_allocated", "time_spent"], {"active":True})
df = DataFrame(res, columns=["designator", "time_allocated", "time_spent"])
alloc_hours = np.array([ta.total_seconds() / 3600. for ta in df["time_allocated"]])
spent_hours = np.array([ts.total_seconds() / 3600. for ts in df["time_spent"]])
free_hours = alloc_hours - spent_hours
df = df.assign(alloc_hours=alloc_hours, spent_hours=spent_hours, free_hours=free_hours)
else:
if (inidate is None or enddate is None):
res = db.execute_sql(""" SELECT a.designator, a.time_allocated, a.time_spent
FROM allocation a, program p, groups g, usergroups ug
WHERE a.program_id = p.id AND p.group_id = g.id
AND g.id = ug.group_id AND a.active is True AND ug.user_id = %d"""%(user_id))
df = DataFrame(res, columns=["designator", "time_allocated", "time_spent"])
alloc_hours = np.array([ta.total_seconds() / 3600. for ta in df["time_allocated"]])
spent_hours = np.array([ts.total_seconds() / 3600. for ts in df["time_spent"]])
free_hours = alloc_hours - spent_hours
df = df.assign(alloc_hours=alloc_hours, spent_hours=spent_hours, free_hours=free_hours)
else:
res = db.execute_sql(""" SELECT DISTINCT a.id, a.designator, a.time_allocated
FROM allocation a, program p, groups g, usergroups ug
WHERE a.program_id = p.id AND p.group_id = g.id
AND g.id = ug.group_id AND a.active is True AND ug.user_id = %d;"""%(user_id))
allocdes = []
spent_hours = []
alloc = []
for ais in res:
spent = db.get_allocation_spent_time(ais[0], inidate, enddate)
allocdes.append(ais[1])
spent_hours.append(int(spent)/3600.)
alloc.append(ais[2])
res = np.array([allocdes, alloc, spent_hours])
df = DataFrame(res.T, columns=["designator", "time_allocated", "time_spent"])
alloc_hours = np.array([ta.total_seconds() / 3600. for ta in df["time_allocated"]])
free_hours = alloc_hours - spent_hours
df = df.assign(alloc_hours=alloc_hours, spent_hours=spent_hours, free_hours=free_hours)
df = df.sort_values(by=["alloc_hours"], ascending=False)
alloc_names = df["designator"].values
category = ["alloc_hours", "spent_hours", "free_hours"]
data = {'allocations' : alloc_names}
for cat in category:
data[cat] = df.fillna(0)[cat]
return data
if __name__ == "__main__":
#get_request_by_id(20180416223941629)
pass
| scizen9/kpy | flask/model.py | Python | gpl-2.0 | 25,935 |
from verbs.baseforms import forms
class CoverForm(forms.VerbForm):
name = "Cover"
slug = "cover"
add_conditional_statement = forms.CharField()
edit_what_remark = forms.CharField()
comment_why = forms.CharField()
duration_min_time = forms.IntegerField() | Bionetbook/bionetbook | bnbapp/bionetbook/_old/verbs/forms/cover.py | Python | mit | 281 |
#!/usr/bin/python
import re
import ipaddress as ip
import argparse
import config as cfg
import sys
#Adds libs dir to sys.path
sys.path.insert( 0, cfg.libdir )
#import expect module
import expect
#Dictionary to store all url objects
urls = {}
#Dictionary to store all addresses objects
ipobjectsbyip = {}
#Dictionary to store not imported addresses because are dupplicated
duplicatedaddressbyname = {}
#Dictionary to store all elementes already added to any checkpoint script
#its mission is avoid duplicated objects
addedelements = {}
"""
TODO:
Minimal changes:
Move update_all to the writecpscript function
Important changes:
Connect to the management and get: objects, applications, users... to adapt to the config and avoid duplicities
"""
#Set the variables from config file
SMSip=cfg.SMSip
SMSuser=cfg.SMSuser
SMSpwd=cfg.SMSpwd
path = cfg.path
dir_csv = cfg.dir_csv
dir_cp = cfg.dir_cp
logfile = cfg.logfile
avoidduplicatedobjects = cfg.avoidduplicatedobjects
groupby = cfg.groupby
rulenumber = cfg.rulenamenumber
rulenamenumber = cfg.rulenamenumber
policyname = cfg.policyname
parser = argparse.ArgumentParser()
parser.add_argument( "-p", "--policyname", dest="policyname", default="Standard", help="The name of the Checkpoint policy in which to import the rules", type=str )
parser.add_argument( "-g", "--groupby", dest="groupby", default="raw", help="Which way to use for grouping rules [source|destination|service|raw|auto]", type=str )
parser.add_argument( "-n", "--rulenumber", dest="rulenumber", default=0, help="The number of the las rule in the rulebase, the new rules will follow it", type=int )
parser.add_argument( "-rn", "--rulenamenumber", dest="rulenamenumber", default=0, help="The start number that will be concatenate to the rulename, when groupby is not raw", type=int )
args = vars( parser.parse_args() )
def objectadded( name, objtype ):
"""
Avoid duplicity of elements
"""
objtype = objtype.lower()
if objtype not in addedelements:
addedelements[ objtype ] = []
if name.lower() in addedelements[ objtype ]:
print "Duplicated " + objtype.lower() + ": " + name
return True
else:
addedelements[ objtype ].append( name.lower() )
return False
def getcsvlines( fname ):
f = open( path + "/" + dir_csv + "/" + fname, "r" )
lines = []
for line in f.readlines():
lines.append( line.replace("\n", "") )
f.close()
return lines[1::]
def writecpscript( commands, filename ):
f = open(path+"/"+dir_cp+"/"+filename, "w")
for line in commands:
f.write( unicode(line) + "\n" )
f.close()
def swapname ( obj, objtype ):
with open( path + "/cpservices.txt", "r" ) as f:
cpservices = f.read().split( "\n" )
upperservices = map( str.upper, cpservices )
obj = obj.replace("/","_")
replaced = obj
if objtype == "service":
if obj.find("-ALL") >= 0:
replaced = obj.replace("-ALL", "-FULL")
elif obj == "netbios-ssn":
replaced = "NBT"
elif obj.upper() in upperservices:
return False
elif objtype == "object":
if obj.find("interface") >= 0:
replaced = obj.replace("interface","int")
elif obj.find("INTERFACE") >= 0 :
replaced = obj.replace("INTERFACE","INT")
if replaced != obj:
print "INFORMATIONAL: Object name swapped: " + obj + " => " + replaced
return replaced
return obj
def importobjects():
print "Importing network objects"
lines=[]
objgroups=[]
objects = getcsvlines( "objects.csv" )
for line in objects:
fields = line.split( "," )
objtype = fields[0]
name = swapname( fields[1], "object" )
addr = fields[2]
if avoidduplicatedobjects == True:
#Doesn't import objets with the same name or IP
#than others already imported
if not addr in ipobjectsbyip:
#store address and name of first imported objects
#used to substitute name of duplicated objects
ipobjectsbyip[addr] = name
else:
#store the name and address of the duplicated objects
#which are not imported
duplicatedaddressbyname[ name ] = addr
#check if object is already imported
if objectadded ( addr, "address" ) or objectadded ( name, "object" ):
continue
description = fields[4]
if objtype == "ip":
netmask = fields[3]
if netmask == "255.255.255.255":
lines.append("create host_plain " + name )
else:
lines.append("create network " + name)
lines.append("modify network_objects " + name + " netmask " + netmask)
lines.append("modify network_objects " + name + " ipaddr " + addr)
lines.append("modify network_objects " + name + " comments \"" + description + "\"" )
elif objtype == "group":
objgroups.append( "create network_object_group " + name )
for obj in addr.split( " " ):
obj = obj.replace("/","_")
obj = obj.replace("interface","int")
if obj in urls:
obj = urls[ obj ]
if re.match( "^[0-9]", obj ):
obj = "unicast-"+obj
objgroups.append( "addelement network_objects " + name + " ''" + " network_objects:" + obj )
elif objtype == "url":
addr = "." + addr
urls[ name ] = addr
lines.append ( "create domain " + addr )
lines.append ( "modify network_objects " + addr + " comments " + description )
lines.append ( "update_all" )
objgroups.append ( "update_all" )
writecpscript( lines + objgroups, "objects.cp.txt" )
print " Network objects added: " + str( len( objects ) )
def importpools():
print "Importing ip pools"
lines=[]
objects = getcsvlines( "pools.csv" )
print "POOLS import not yet implemented!!!!"
print " Add this pools manaully!!! "
print "------------------------------------"
print "\n".join( objects )
print "------------------------------------"
return
for line in objects:
fields = line.split( "," )
name = fields[3]
if objectadded ( name, "pool" ):
continue
firstaddr = fields[4]
lastaddr = fields[5]
lines.append( "create address_range " + name )
lines.append( "modify network_objects " + name + " ipaddr_first " + firstaddr )
lines.append( "modify network_objects " + name + " ipaddr_last " + lastaddr )
lines.append("update_all")
writecpscript( lines, "pools.cp.txt" )
print " Pools added: " + str( len( objects ) )
def importusers():
print "Importing users"
lines = []
passwords = []
objects = getcsvlines( "users.csv" )
for line in objects:
fields = line.split( "," )
name = fields[0]
if objectadded ( name, "user" ):
continue
pwd = fields[1]
lines.append( "create user " + name )
lines.append( "modify users " + name + " auth_method 'Internal Password'" )
passwords.append( "set_pass " + name + " '" + pwd + "'" )
lines.append("update_all")
lines = lines + passwords
lines.append("update_all")
writecpscript( lines, "users.cp.txt" )
print " Users added: " + str( len( objects ) )
def importservices():
print "Importing services"
lines = []
passwords = []
objects = getcsvlines( "applications.csv" )
for line in objects:
name,proto,src,dst = line.split( "," )
name = swapname( name, "service" )
if name == False or objectadded ( name, "service" ): #or name in cpservices:
continue
if src != "0-65535" and src != "1-65535":
print " WARNING: source service not implemented yet"
print " Add this service manually or implement me ;)"
print " Service details: " + line
continue
if proto == "tcp":
srvccmd = "create tcp_service"
elif proto == "udp":
srvccmd = "create udp_service"
else:
print " WARNING: Other protocol than tcp or udp, not implemented yet!"
print " Add this service manually or implement me ;)"
print " Service details: " + line
#srvccmd = "create other_service"
continue
lines.append( srvccmd + " " + name )
lines.append( "modify services " + name + " port " + dst )
lines.append("update_all")
writecpscript( lines, "services.cp.txt" )
print " Services added: " + str( len( objects ) )
def importservicegroups():
print "Importing service groups"
objects = getcsvlines( "application-groups.csv" )
lines = []
for line in objects:
name,srvcs = line.split( "," )
name = swapname( name, "service_group" )
if objectadded ( name, "service_group" ):
continue
lines.append ( "create service_group " + name )
for srvc in srvcs.split( " " ):
lines.append ( "addelement services " + name + " '' services:" + srvc)
lines.append("update_all")
writecpscript( lines, "service-groups.cp.txt" )
print " Service groups added: " + str( len( objects ) )
def atomizerules ( rules ):
"""
Expand all rules so una line is for one source, one destination
and one service.
"""
atomized = []
for rule in rules:
name,action,log,source,destination,application,disabled = rule.split( "," )
for src in source.split(" "):
for dst in destination.split(" "):
for app in application.split(" "):
atomized.append( name + "," + action + "," + log + "," + src + "," + dst + "," + app + "," + disabled )
return atomized
def groupbydestination( rules ):
retrules = {}
for line in rules:
#Loop each rule to find other ones to group with
name,action,log,source,destination,application,disabled = line.split( "," )
#Group rules by destination
key= ",".join([ destination, action, disabled, application ])
if not key in retrules:
retrules[ key ] = {}
retrules[ key ][ "sources" ] = []
if not "log" in retrules[ key ] or ( "log" in retrules[ key ] and log == "Log" ):
retrules[ key ][ "log" ] = log
if not source in retrules[ key ][ "sources" ]:
retrules[ key ][ "sources" ].append( source )
return retrules
def groupbysource( rules ):
retrules = {}
for line in rules:
#Loop each rule to find other ones to group with
name,action,log,source,destination,application,disabled = line.split( "," )
#Group rules by source
key= ",".join([ source, action, disabled, application ])
if not key in retrules:
retrules[ key ] = {}
retrules[ key ][ "destinations" ] = []
if not "log" in retrules[ key ] or ( "log" in retrules[ key ] and log == "Log" ):
retrules[ key ][ "log" ] = log
if not destination in retrules[ key ][ "destinations" ]:
retrules[ key ][ "destinations" ].append( destination )
return retrules
def groupbyservice( rules ):
retrules = {}
for line in rules:
#Loop each rule to find other ones to group with
name,action,log,source,destination,application,disabled = line.split( "," )
#Group rules by services
key= ",".join([ application, action, disabled ])
if not key in retrules:
retrules[ key ] = {}
retrules[ key ][ "destinations" ] = []
retrules[ key ][ "sources" ] = []
if not "log" in retrules[ key ] or ( "log" in retrules[ key ] and log == "Log" ):
retrules[ key ][ "log" ] = log
if not destination in retrules[ key ][ "destinations" ]:
retrules[ key ][ "destinations" ].append( destination )
if not source in retrules[ key ][ "sources" ]:
retrules[ key ][ "sources" ].append( source )
return retrules
def optimizerules ( rules ):
"""
Group rules that shares same source, destination, or action
Three variants:
1 "samedest": Same destination, service, action and status ( enabled or disabled)
2 "samesrc" : Same source, service, action and status ( enabled or disabled)
3 "samesrv" : Same service, action and status ( enabled or disabled)
Then count wich variant have less rules and apply that to group the final rules
"""
totalrules = len( rules )
rules = atomizerules ( rules )
grouped = {} # store grouped rules
grouped [ "samedest" ] = groupbydestination( rules )
grouped [ "samesrc" ] = groupbysource ( rules )
grouped [ "samesrv" ] = groupbyservice ( rules )
groupby = args[ "groupby" ]
if groupby == "source":
lessrules = "samesrc"
elif groupby == "destination":
lessrules = "samedest"
elif groupby == "service":
lessrules = "samesrv"
elif groupby == "auto":
lessrules = sorted ( { "samedest": len( grouped [ "samedest" ] ), "samesrc": len( grouped [ "samesrc" ] ), "samesrv": len( grouped [ "samesrv" ] ) } )[ -1 ]
retrules = []
i=rulenamenumber
for key, value in grouped[ lessrules ].items():
#Build csv format again
log = value[ "log" ]
if lessrules == "samedest":
destination, action, disabled, application = key.split(",")
source = " ".join( value["sources"] )
name = destination
elif lessrules == "samesrc":
source, action, disabled, application = key.split(",")
source = " ".join( value["destinations"] )
name = source
elif lessrules == "samesrv":
application, action, disabled = key.split(",")
source = " ".join( value["sources"] )
destination = " ".join( value["destinations"] )
name = application
retrules.append( ",".join( [ name,action,log,source,destination,application,disabled ] ) )
i+=1
return retrules
def unhiderules ( rules ):
#Ensure that no rule hides other one
name,action,log,source,destination,application,disabled = line.split( "," )
def importpolicies():
print "Importing access policies"
lines=[]
i=args[ "rulenumber" ]
objects = getcsvlines( "policies.csv" )
if args[ "groupby" ] != "raw":
objects = optimizerules ( objects )
f = open("rules-optimized.csv", "w")
for line in objects:
f.write( line + "\n" )
f.close()
#objects = unhiderules ( objects )
for line in objects:
name,action,log,source,destination,application,disabled = line.split( "," )
if objectadded ( "From: " + source + " To: " + destination + " Service: " + application + " Action: " + action + " Disable: " + disabled, "policy" ):
continue
polname = args[ "policyname" ]
lines.append( "addelement fw_policies ##" + polname + " rule security_rule" )
lines.append( "modify fw_policies ##" + polname + " rule:" + str(i) + ":name " + name )
lines.append( "modify fw_policies ##" + polname + " rule:" + str(i) + ":disabled " + disabled )
lines.append( "rmbyindex fw_policies ##" + polname + " rule:" + str(i) + ":track 0" )
lines.append( "addelement fw_policies ##" + polname + " rule:" + str(i) + ":track tracks:" + log )
lines.append( "addelement fw_policies ##" + polname + " rule:" + str(i) + ":time globals:Any" )
lines.append( "addelement fw_policies ##" + polname + " rule:" + str(i) + ":install:'' globals:Any" )
#lines.append( "rmbyindex fw_policies ##" + polname + " rule:" + str(i) + ":action 0" )
if action == "accept":
lines.append( "addelement fw_policies ##" + polname + " rule:" + str(i) + ":action accept_action:" + action )
elif action == "deny":
lines.append( "addelement fw_policies ##" + polname + " rule:" + str(i) + ":action drop_action:drop" )
for src in source.split( " " ):
if src in urls:
src = urls[ src ]
if avoidduplicatedobjects == True:
#Looks if src is duplicated and, if is it,
#is substituted with the first imported one
if src in duplicatedaddressbyname:
print "Changed source name %s for %s because it is the same IP" % ( src, ipobjectsbyip[duplicatedaddressbyname[ src ]] )
src = ipobjectsbyip[duplicatedaddressbyname[ src ]]
if src.lower() == "any":
src = "globals:Any"
else:
src = "network_objects:" + swapname( src, "object" )
lines.append( "addelement fw_policies ##" + polname + " rule:" + str(i) + ":src:'' " + src )
lines.append( "modify fw_policies ##" + polname + " rule:" + str(i) + ":src:op ''" )
for dst in destination.split( " " ):
if dst in urls:
print "url for dst: " + dst
dst = urls[ dst ]
print "url dst changed to: " + dst
if avoidduplicatedobjects == True:
#Looks if dst is duplicated and, if is it,
#is substituted with the first imported one
if dst in duplicatedaddressbyname:
print "Changed destination name %s for %s because it is the same IP" % ( dst, ipobjectsbyip[duplicatedaddressbyname[ dst ]] )
dst = ipobjectsbyip[duplicatedaddressbyname[ dst ]]
if dst == "any":
dst = "globals:Any"
else:
dst = "network_objects:" + swapname( dst, "object" )
lines.append( "addelement fw_policies ##" + polname + " rule:" + str(i) + ":dst:'' " + dst )
lines.append( "modify fw_policies ##" + polname + " rule:" + str(i) + ":dst:op ''" )
for app in application.split( " " ):
if app == "any":
app = "globals:Any"
else:
origapp = app
app = swapname( app, "service" )
if app == False:
app = origapp
app = "services:" + app
lines.append( "addelement fw_policies ##" + polname + " rule:" + str(i) + ":services:'' " + app )
i += 1
lines.append("update_all")
writecpscript( lines, "policies.cp.txt" )
print " Policies added: " + str( len( objects ) )
def importnat():
print "NAT import not yet implemented!!!!"
importobjects()
#importpools()
#importusers()
importservices()
#importservicegroups()
#importnat()
importpolicies()
#orden dbedit: objects, services, service-groups, pools, policies, nat
| abdulet/fwbuilder | modules/csv2checkpoint/csv2checkpoint-dbedit.py | Python | gpl-3.0 | 19,025 |
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.db import models
class CustomUserNonUniqueUsername(AbstractBaseUser):
"""
A user with a non-unique username.
This model is not invalid if it is used with a custom authentication
backend which supports non-unique usernames.
"""
username = models.CharField(max_length=30)
email = models.EmailField(blank=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = UserManager()
| yephper/django | tests/auth_tests/models/invalid_models.py | Python | bsd-3-clause | 638 |
# -*- coding: utf-8 -*-
from os import environ
from subprocess import PIPE, Popen
from sys import exit
args = [environ["CORGI"], "match", "[ \\t\\n\\r]", "\n"]
proc = Popen(args, stdout=PIPE)
stdout = proc.stdout.read().decode("UTF-8")
proc.wait()
if stdout != "\n":
exit(1)
exit(0)
# vim: tabstop=4 shiftwidth=4 expandtab softtabstop=4
| SumiTomohiko/corgi | tests/test_charset080.py | Python | mit | 344 |
from . import PqsoDerivative
from pyRSD.rsd.tools import k_AP, mu_AP
class dPqso_db1(PqsoDerivative):
"""
The partial derivative of :func:`QuasarSpectrum.power` with respect to
``b1``
"""
param = 'b1'
@staticmethod
def eval(m, pars, k, mu):
kprime = k_AP(k, mu, m.alpha_perp, m.alpha_par)
muprime = mu_AP(mu, m.alpha_perp, m.alpha_par)
# derivative of scale-dependent bias term
dbtot_db1 = 1 + 2*m.f_nl*m.delta_crit/m.alpha_png(kprime)
# finger-of-god
G = m.FOG(kprime, muprime, m.sigma_fog)
# the final derivative
btot = m.btot(kprime)
rescaling = (m.alpha_drag**3) / (m.alpha_perp**2 * m.alpha_par)
return rescaling * G**2 * (2*m.P_mu0(kprime) + muprime**2 * m.P_mu2(kprime)) * dbtot_db1 / btot
| nickhand/pyRSD | pyRSD/rsd/power/qso/derivatives/b1.py | Python | gpl-3.0 | 813 |
"""Supplementary tools for the `iris` EOF analysis interface."""
# (c) Copyright 2013-2016 Andrew Dawson. All Rights Reserved.
#
# This file is part of eofs.
#
# eofs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# eofs is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with eofs. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function) # noqa
from copy import copy
from functools import reduce
import warnings
import numpy as np
from iris.cube import Cube
from iris.analysis.cartography import area_weights
from iris.analysis.cartography import cosine_latitude_weights
from iris.exceptions import CoordinateMultiDimError, CoordinateNotFoundError
from . import standard
from .generic import covcor_dimensions
def weights_array(cube, scheme):
"""Weights for a data set on a grid.
Returned weights are a `numpy.ndarray` broadcastable to the shape of
the input cube.
**Arguments:**
*cube*
An `~iris.cube.Cube` instance to generate weights for.
*scheme*
Weighting scheme to use. The following values are accepted:
* *'coslat'* : Square-root of cosine of latitude.
* *'area'* : Square-root of grid cell area normalized by total
grid area.
**Returns:**
*weights*
An array contanining the weights (not a `~iris.cube.Cube`).
**Examples:**
Area weights for a `~iris.cube.Cube` on 2D grid:
weights = weights_array(cube, scheme='area')
Square-root of cosine of latitude weights for a `~iris.cube.Cube`
with a latitude dimension:
weights = weights_array(cube, scheme='coslat')
"""
# Always use lower-case for the scheme, allowing the user to use
# upper-case in their calling code without an error.
scheme = scheme.lower()
if scheme in ('area',):
# Handle area weighting.
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
weights = np.sqrt(area_weights(cube, normalize=True))
except (ValueError, CoordinateMultiDimError):
raise ValueError('cannot generate area weights')
elif scheme in ('coslat',):
# Handle square-root of cosine of latitude weighting.
try:
weights = np.sqrt(cosine_latitude_weights(cube))
except (ValueError, CoordinateMultiDimError):
raise ValueError('cannot generate latitude weights')
else:
raise ValueError("invalid weighting scheme: '{!s}'".format(scheme))
return weights
def coord_and_dim(cube, coord, multiple=False):
"""
Retrieve a coordinate dimension and its corresponding position from
a `~iris.cube.Cube` instance.
**Arguments:**
*cube*
An `~iris.cube.Cube` instance to retrieve the dimension from.
*coord*
Name of the coordinate dimension to retrieve.
**Returns:**
*coord_tuple*
A 2-tuple of (coordinate_dimension, dimension_number).
.. deprecated:: version 1.2
The function `get_time_coord` is used instead for finding time
coordinates. For other coordinates please use the iris built-in
functionality for locating required cooridnates.
"""
deprecation_message = ('coord_and_dim() is deprecated, please use '
'get_time_coord() or built-in iris functionality')
warnings.warn(deprecation_message, DeprecationWarning)
coords = [c for c in cube.dim_coords if coord in c.name()]
if len(coords) > 1:
raise ValueError('multiple {} coordinates are not '
'allowed'.format(coord))
try:
c = coords[0]
except IndexError:
raise ValueError('cannot get {!s} coordinate from '
'cube {!r}'.format(coord, cube))
c_dim = cube.coord_dims(c)
c_dim = c_dim[0] if c_dim else None
return c, c_dim
def get_time_coord(cube):
"""
Retrieve the time coordinate dimension and its corresponding
position from a `~iris.cube.Cube` instance.
**Arguments:**
*cube*
An `~iris.cube.Cube` instance to retrieve the dimension from.
**Returns:**
*coord_tuple*
A 2-tuple of (coordinate_dimension, dimension_number).
"""
time_coords = cube.coords(axis='T', dim_coords=True)
if not time_coords:
# If no coordinates were identified as time, relax the criteria and
# look for dimension coordinates with 'time' in the name:
time_coords = [coord for coord in cube.dim_coords
if 'time' in coord.name()]
if not time_coords:
raise ValueError('cannot find a time dimension coordinate')
if len(time_coords) > 1:
raise ValueError('multiple time coordinates are not allowed')
time_coord = time_coords[0]
time_dim = cube.coord_dims(time_coord)[0]
return time_coord, time_dim
def classified_aux_coords(cube):
"""
Classify a Cube's auxiliary coordinates into those that span only
the time dimension, those that span only space dimensions, and those
that span both time and space dimensions.
**Arguments:**
*cube*
An `~iris.cube.Cube` instance whose auxiliary coordinates should
be classified.
**Returns:**
*coord_sets*
A 3-tuple of lists of coordinates. The first element is the list
of all auxiliary coordinates spannning only the time dimension,
the second element is the list of all auxiliary coordinates
spannning only space dimensions, and the third element is the
list of all auxiliary coordinates spannning both time and space
dimensions.
"""
try:
_, timedim = get_time_coord(cube)
except ValueError:
timedim = None
time_only = []
space_only = []
time_and_space = []
for coord in cube.aux_coords:
dims = cube.coord_dims(coord)
if dims == (timedim,):
time_only.append((copy(coord), timedim))
elif dims:
if timedim in dims:
time_and_space.append((copy(coord), dims))
else:
space_only.append((copy(coord), dims))
return time_only, space_only, time_and_space
def common_items(item_set):
"""
Given an iterable of lists, constructs a list of every item that is
present in all of the lists.
**Arguments:**
*item_set*
An iterable containing lists of items.
**Returns:**
*common*
A list of the items which occur in all sublists in the input.
"""
common = []
for item in reduce(lambda x, y: x + y, item_set):
item_ok = all([item in items for items in item_set])
if item_ok and item not in common:
common.append(item)
return common
def _time_coord_info(cube):
time, time_dim = get_time_coord(cube)
coords = [copy(coord) for coord in cube.dim_coords]
coords.remove(time)
coords = [copy(time)] + coords
return time_dim, coords
def _map_and_dims(pcs, field, mapfunc, *args, **kwargs):
"""
Compute a set of covariance/correlation maps and the resulting
dimensions.
"""
info = {}
for cube in (field, pcs):
info[cube.name()] = _time_coord_info(cube)
cmap_args = [np.rollaxis(cube.data, info[cube.name()][0])
for cube in (pcs, field)]
cmap_args += args
dim_args = [info[cube.name()][1] for cube in (pcs, field)]
cmap = mapfunc(*cmap_args, **kwargs)
dims = covcor_dimensions(*dim_args)
return cmap, dims
def correlation_map(pcs, field):
"""Correlation between PCs and a field.
Computes maps of the correlation between each PC and the given
field at each grid point.
Given a set of PCs contained in a `~iris.cube.Cube` (e.g., as output
from `eofs.iris.Eof.pcs`) and a field with a time dimension
contained in a `iris.cube.Cube`, one correlation map per PC is
computed.
The field must have the same length time dimension as the PCs. Any
number of spatial dimensions (including zero) are allowed in the
field and there can be any number of PCs.
**Arguments:**
*pcs*
PCs contained in a `~iris.cube.Cube`.
*field*
Field Spatial-temporal field contained in a `~iris.cube.Cube`.
**Returns:**
*correlation_maps*
A `~iris.cube.Cube` containing the correlation maps.
**Examples:**
Assuming *solver* is an instance of `eofs.iris.Eof`, compute
correlation maps for each PC::
pcs = solver.pcs(pcscaling=1)
correlation_maps = correlation_map(pcs, field)
"""
# Compute the correlation map and retrieve appropriate Iris coordinate
# dimensions for it.
cor, dims = _map_and_dims(pcs, field, standard.correlation_map)
if not dims:
# There are no output dimensions, return a scalar.
return cor
# Otherwise return an Iris cube.
cor = Cube(cor, dim_coords_and_dims=list(zip(dims, range(cor.ndim))))
cor.long_name = 'pc_correlation'
return cor
def covariance_map(pcs, field, ddof=1):
"""Covariance between PCs and a field.
Computes maps of the covariance between each PC and the given
field at each grid point.
Given a set of PCs contained in a `~iris.cube.Cube` (e.g., as output
from `eofs.iris.Eof.pcs`) and a field with a time dimension
contained in a `iris.cube.Cube`, one covariance map per PC is
computed.
The field must have the same length time dimension as the PCs. Any
number of spatial dimensions (including zero) are allowed in the
field and there can be any number of PCs.
**Arguments:**
*pcs*
PCs contained in a `~iris.cube.Cube`.
*field*
Field Spatial-temporal field contained in a `~iris.cube.Cube`.
**Keyword arguments:**
*ddof*
'Delta degrees of freedom'. The divisor used to normalize
the covariance matrix is *N - ddof* where *N* is the
number of samples. Defaults to *1*.
**Returns:**
*covariance_maps*
A `~iris.cube.Cube` containing the covariance maps.
**Examples:**
Compute covariance maps for each PC::
pcs = solver.pcs(pcscaling=1)
covariance_maps = covariance_map(pcs, field)
"""
# Compute the covariance map and retrieve appropriate Iris coordinate
# dimensions for it.
cov, dims = _map_and_dims(pcs, field, standard.covariance_map, ddof=ddof)
if not dims:
# There are no output dimensions, return a scalar.
return cov
# Otherwise return an Iris cube.
cov = Cube(cov, dim_coords_and_dims=list(zip(dims, range(cov.ndim))))
cov.long_name = 'pc_covariance'
return cov
| ajdawson/eofs | lib/eofs/tools/iris.py | Python | gpl-3.0 | 11,172 |
#!/usr/bin/env python
import cv2
import numpy as np
import multiprocessing
from time import sleep, time
from .tools import PiCameraOperations
from ..ktrafficlight import TrafficLight
from ..pathsandnames import PathsAndNames
from .picamera_simulation import PiCameraSimulation
class Capturador(multiprocessing.Process):
"""
:param:
:return: I / O
5MP
width: int: 2592
height: int: 1944
8MP
width: int: 3280
height: int: 2464
"""
def __init__(self, video_source=0,
width=2592,
height=1944,
simulation=False,
pipe=None,
periodoSemaforo = 0):
super(Capturador, self).__init__()
self.simulation = simulation
# Initial parameters
self.video_source = video_source
self.width = width # Integer Like
self.height = height # Integer Like
# Variable para marcar paquete de señal de Stand-by
self.last_order = 0
# Set the input pipe for Stand-by signal
self.out_pipe = pipe
# Set Queue for send image outputs
#self.queue = PathsAndNames.nombresDeCapturas
self.in_pipe = PathsAndNames.in_pipe
# Instantiate Semaforo
self.semaforo = TrafficLight(periodoSemaforo = periodoSemaforo,visualizacionDebug = False)
PathsAndNames.miReporte.info('EXITOSAMENTE CREE LA CLASE Capturador!!!')
def stop(self):
pass
def run(self):
"""
Main Loop / parallel process for write images onto WorkDir/
:input: standby: Bool (via pipe)
:return: Void : Write in SD images if standby is False
:modes: Simulation and Real Worldwork
"""
# Load ROI Parameters in PiCamera format( 0 to 1) for P1 and P2
p0x, p0y, p1x, p1y = PiCameraOperations.ROI(w=self.width, h=self.height)
scale_factor_in_X, scale_factor_in_Y = PiCameraOperations._scale()
#scale_factor_in_X_LOW, scale_factor_in_Y_LOW = PiCameraOperations._scaleLow()
if self.simulation:
camera = PiCameraSimulation()
PathsAndNames.miReporte.info('Started in emulated camera mode')
else:
import picamera
from .picameraArray import PiRGBAArray
camera = picamera.PiCamera( resolution=(self.width, self.height),
framerate=2)
PathsAndNames.miReporte.info('Started in real Pi camera mode')
camera.zoom = (p0x, p0y, p1x, p1y)
camera.exposure_mode = 'sports'
# self.camera.shutter_speed = 190000
# self.camera.iso = 800
if not self.simulation:
lowResCap = PiRGBAArray(camera, size=(scale_factor_in_X,scale_factor_in_Y))
# Set low resolution stream as continuous
lowResStream = camera.capture_continuous(lowResCap,
format="bgra",
use_video_port=True,
splitter_port=2,
resize=(scale_factor_in_X,scale_factor_in_Y))
while True:
"""
Maintain the constant saving PiCamera HD and Semaforo LR frames to disk and
send analysis from semaforo pixeles to main program via Queue.
"""
# Keep track for standby input
if self.out_pipe.poll():
self.last_order = self.out_pipe.recv()
# Check if standby or not
if self.last_order > 0:
if not self.simulation:
lrs = lowResStream.__next__()
lowRes_array = lrs.array
# Truncate low res frame
lowResCap.truncate(0)
# Obtain pixeles for LowRes frame
pixeles = PathsAndNames.traffic_ligth_pixeles(lowRes_array)
# Get traffic light and get the color as integer
colourFound,flanco = self.semaforo.estadoSemaforo(pixeles)
#semaforo_array = np.reshape(pixeles, (24, 8, 3))
color_asinteger = colourFound%4
else:
pixeles = np.zeros((192,8), dtype=int)
# Get traffic light and get the color as integer
colourFound,flanco = self.semaforo.estadoSemaforo(pixeles)
#semaforo_array = np.reshape(pixeles, (24, 8, 3))
color_asinteger = colourFound%4
# Obtain Actual Datetime stamp for this iteration of while loop
actual_datestamp_array = time()#, actual_datestamp_string = PiCameraOperations.get_actual_time()
nombreDeArchivo = PathsAndNames.convertirANombreArchivo(actual_datestamp_array)
# Si el color es el apropiado se manda por pipe y se guarda en disco, caso contrario se manda por pipe el vector nulo para mantener el track del flanco y el color
if (colourFound>0) or (self.last_order == 2):
self.in_pipe.send((actual_datestamp_array, color_asinteger,flanco))
if self.simulation is False:
cv2.imwrite(PathsAndNames.directorioDeCaptura+'/'+nombreDeArchivo+'_{}.jpg'.format(color_asinteger), lowRes_array)
else:
camera.capture_sequence(dst_name=PathsAndNames.directorioDeCaptura+'/'+nombreDeArchivo+'_{}.jpg'.format(color_asinteger),resolution=(320,240))
else:
self.in_pipe.send((0, color_asinteger,flanco))
# sleep(0.3)
| AlvaroRQ/prototipo | ownLibraries/shutterFiles/capturador.py | Python | gpl-3.0 | 5,811 |
# coding=utf-8
# Based on the SQuAD evaluation script from:
# https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
| google-research/xtreme | third_party/evaluate_squad.py | Python | apache-2.0 | 4,175 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from ggrc.models.mixins import (
Base, Titled, Described, Timeboxed, Stateful, WithContact
)
class CycleTaskGroup(
WithContact, Stateful, Timeboxed, Described, Titled, Base, db.Model):
__tablename__ = 'cycle_task_groups'
_title_uniqueness = False
VALID_STATES = (u'Assigned', u'InProgress', u'Finished', u'Verified', u'Declined')
cycle_id = db.Column(
db.Integer, db.ForeignKey('cycles.id'), nullable=False)
task_group_id = db.Column(
db.Integer, db.ForeignKey('task_groups.id'), nullable=True)
cycle_task_group_objects = db.relationship(
'CycleTaskGroupObject',
backref='cycle_task_group',
cascade='all, delete-orphan'
)
cycle_task_group_tasks = db.relationship(
'CycleTaskGroupObjectTask',
backref='cycle_task_group',
cascade='all, delete-orphan'
)
sort_index = db.Column(
db.String(length=250), default="", nullable=False)
next_due_date = db.Column(db.Date)
_publish_attrs = [
'cycle',
'task_group',
'cycle_task_group_objects',
'cycle_task_group_tasks',
'sort_index',
'next_due_date',
]
| uskudnik/ggrc-core | src/ggrc_workflows/models/cycle_task_group.py | Python | apache-2.0 | 1,394 |
def Setup(Settings,DefaultModel):
# finetune_tests_COOK.py
Settings["experiment_name"] = "Finetuning-tests_big_cca_10hrs_experiment_COOK"
Settings["graph_histories"] = ['together'] #['all','together',[],[1,0],[0,0,0],[]]
n = 0
Settings["models"][n]["model_type"] = 'simple_cnn_with_top'
Settings["models"][n]["unique_id"] = 'img_only'
Settings["models"][n]["epochs"] = 2
Settings["models"][n]["number_of_images"] = None
Settings["models"][n]["cooking_method"] = 'generators'
# COOK ALL THE FINETUNING FEATURES
Settings["models"][n]["finetune"] = True
Settings["models"][n]["finetune_num_of_cnn_layers"] = 162
Settings["models"][n]["finetune_epochs"] = 0
Settings["models"][n]["number_of_images"] = None
Settings["models"][n]["cooking_method"] = 'generators'
Settings["models"].append(DefaultModel.copy())
n+=1
Settings["models"][n]["dataset_name"] = "1200x_markable_299x299"
Settings["models"][n]["model_type"] = 'simple_cnn_with_top'
Settings["models"][n]["unique_id"] = 'b'
Settings["models"][n]["epochs"] = 2
Settings["models"][n]["finetune"] = True
Settings["models"][n]["finetune_num_of_cnn_layers"] = 152
Settings["models"][n]["finetune_epochs"] = 0
Settings["models"][n]["number_of_images"] = None
Settings["models"][n]["cooking_method"] = 'generators'
return Settings
| previtus/MGR-Project-Code | Settings/independent_experiments/finetunning_tests/finetune_tests_COOK.py | Python | mit | 1,395 |
import logging
from github3 import GitHub
import config
from plugins.repository.helper import *
class BitBucketHelper(RepositoryHelper):
def __init__(self, repo_url=None):
self.repo_url = repo_url
def can_process(self, url):
if "bitbucket.com" in url:
self.repo_url = url
return True
else:
return False
def login(self):
"""
Login using the appropriate credentials
:return:
"""
raise NotImplementedError("This method must be overridden")
def get_files_from_root(self, candidate_filenames):
"""
Given a list of candidate file names, examine the repository root, returning the file names and contents
:param candidate_filenames: A list of the files of interest e.g. ['COPYING','LICENSE']
:return: A Dictionary of the form {'filename':file_contents,...}
"""
raise NotImplementedError("This method must be overridden") | softwaresaved/software-assessment-framework | plugins/repository/bitbucket.py | Python | bsd-3-clause | 985 |
from itertools import product
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import same_color
import pytest
from numpy.testing import assert_array_equal
from ..palettes import color_palette
from ..relational import (
_RelationalPlotter,
_LinePlotter,
_ScatterPlotter,
relplot,
lineplot,
scatterplot
)
@pytest.fixture(params=[
dict(x="x", y="y"),
dict(x="t", y="y"),
dict(x="a", y="y"),
dict(x="x", y="y", hue="y"),
dict(x="x", y="y", hue="a"),
dict(x="x", y="y", size="a"),
dict(x="x", y="y", style="a"),
dict(x="x", y="y", hue="s"),
dict(x="x", y="y", size="s"),
dict(x="x", y="y", style="s"),
dict(x="x", y="y", hue="a", style="a"),
dict(x="x", y="y", hue="a", size="b", style="b"),
])
def long_semantics(request):
return request.param
class Helpers:
# TODO Better place for these?
def scatter_rgbs(self, collections):
rgbs = []
for col in collections:
rgb = tuple(col.get_facecolor().squeeze()[:3])
rgbs.append(rgb)
return rgbs
def paths_equal(self, *args):
equal = all([len(a) == len(args[0]) for a in args])
for p1, p2 in zip(*args):
equal &= np.array_equal(p1.vertices, p2.vertices)
equal &= np.array_equal(p1.codes, p2.codes)
return equal
class TestRelationalPlotter(Helpers):
def test_wide_df_variables(self, wide_df):
p = _RelationalPlotter()
p.assign_variables(data=wide_df)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
assert len(p.plot_data) == np.product(wide_df.shape)
x = p.plot_data["x"]
expected_x = np.tile(wide_df.index, wide_df.shape[1])
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = wide_df.values.ravel(order="f")
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(wide_df.columns.values, wide_df.shape[0])
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] == wide_df.index.name
assert p.variables["y"] is None
assert p.variables["hue"] == wide_df.columns.name
assert p.variables["style"] == wide_df.columns.name
def test_wide_df_with_nonnumeric_variables(self, long_df):
p = _RelationalPlotter()
p.assign_variables(data=long_df)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
numeric_df = long_df.select_dtypes("number")
assert len(p.plot_data) == np.product(numeric_df.shape)
x = p.plot_data["x"]
expected_x = np.tile(numeric_df.index, numeric_df.shape[1])
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = numeric_df.values.ravel(order="f")
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(
numeric_df.columns.values, numeric_df.shape[0]
)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] == numeric_df.index.name
assert p.variables["y"] is None
assert p.variables["hue"] == numeric_df.columns.name
assert p.variables["style"] == numeric_df.columns.name
def test_wide_array_variables(self, wide_array):
p = _RelationalPlotter()
p.assign_variables(data=wide_array)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
assert len(p.plot_data) == np.product(wide_array.shape)
nrow, ncol = wide_array.shape
x = p.plot_data["x"]
expected_x = np.tile(np.arange(nrow), ncol)
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = wide_array.ravel(order="f")
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(np.arange(ncol), nrow)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_flat_array_variables(self, flat_array):
p = _RelationalPlotter()
p.assign_variables(data=flat_array)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == np.product(flat_array.shape)
x = p.plot_data["x"]
expected_x = np.arange(flat_array.shape[0])
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = flat_array
assert_array_equal(y, expected_y)
assert p.variables["x"] is None
assert p.variables["y"] is None
def test_flat_list_variables(self, flat_list):
p = _RelationalPlotter()
p.assign_variables(data=flat_list)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == len(flat_list)
x = p.plot_data["x"]
expected_x = np.arange(len(flat_list))
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = flat_list
assert_array_equal(y, expected_y)
assert p.variables["x"] is None
assert p.variables["y"] is None
def test_flat_series_variables(self, flat_series):
p = _RelationalPlotter()
p.assign_variables(data=flat_series)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == len(flat_series)
x = p.plot_data["x"]
expected_x = flat_series.index
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = flat_series
assert_array_equal(y, expected_y)
assert p.variables["x"] is flat_series.index.name
assert p.variables["y"] is flat_series.name
def test_wide_list_of_series_variables(self, wide_list_of_series):
p = _RelationalPlotter()
p.assign_variables(data=wide_list_of_series)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_list_of_series)
chunk_size = max(len(l) for l in wide_list_of_series)
assert len(p.plot_data) == chunks * chunk_size
index_union = np.unique(
np.concatenate([s.index for s in wide_list_of_series])
)
x = p.plot_data["x"]
expected_x = np.tile(index_union, chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = np.concatenate([
s.reindex(index_union) for s in wide_list_of_series
])
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
series_names = [s.name for s in wide_list_of_series]
expected_hue = np.repeat(series_names, chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_list_of_arrays_variables(self, wide_list_of_arrays):
p = _RelationalPlotter()
p.assign_variables(data=wide_list_of_arrays)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_list_of_arrays)
chunk_size = max(len(l) for l in wide_list_of_arrays)
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(wide_list_of_arrays)
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(np.arange(chunks), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_list_of_list_variables(self, wide_list_of_lists):
p = _RelationalPlotter()
p.assign_variables(data=wide_list_of_lists)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_list_of_lists)
chunk_size = max(len(l) for l in wide_list_of_lists)
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(wide_list_of_lists)
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(np.arange(chunks), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_dict_of_series_variables(self, wide_dict_of_series):
p = _RelationalPlotter()
p.assign_variables(data=wide_dict_of_series)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_dict_of_series)
chunk_size = max(len(l) for l in wide_dict_of_series.values())
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(list(wide_dict_of_series.values()))
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(list(wide_dict_of_series), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_dict_of_arrays_variables(self, wide_dict_of_arrays):
p = _RelationalPlotter()
p.assign_variables(data=wide_dict_of_arrays)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_dict_of_arrays)
chunk_size = max(len(l) for l in wide_dict_of_arrays.values())
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(list(wide_dict_of_arrays.values()))
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(list(wide_dict_of_arrays), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_dict_of_lists_variables(self, wide_dict_of_lists):
p = _RelationalPlotter()
p.assign_variables(data=wide_dict_of_lists)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_dict_of_lists)
chunk_size = max(len(l) for l in wide_dict_of_lists.values())
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(list(wide_dict_of_lists.values()))
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(list(wide_dict_of_lists), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_long_df(self, long_df, long_semantics):
p = _RelationalPlotter(data=long_df, variables=long_semantics)
assert p.input_format == "long"
assert p.variables == long_semantics
for key, val in long_semantics.items():
assert_array_equal(p.plot_data[key], long_df[val])
def test_long_df_with_index(self, long_df, long_semantics):
p = _RelationalPlotter(
data=long_df.set_index("a"),
variables=long_semantics,
)
assert p.input_format == "long"
assert p.variables == long_semantics
for key, val in long_semantics.items():
assert_array_equal(p.plot_data[key], long_df[val])
def test_long_df_with_multiindex(self, long_df, long_semantics):
p = _RelationalPlotter(
data=long_df.set_index(["a", "x"]),
variables=long_semantics,
)
assert p.input_format == "long"
assert p.variables == long_semantics
for key, val in long_semantics.items():
assert_array_equal(p.plot_data[key], long_df[val])
def test_long_dict(self, long_dict, long_semantics):
p = _RelationalPlotter(
data=long_dict,
variables=long_semantics,
)
assert p.input_format == "long"
assert p.variables == long_semantics
for key, val in long_semantics.items():
assert_array_equal(p.plot_data[key], pd.Series(long_dict[val]))
@pytest.mark.parametrize(
"vector_type",
["series", "numpy", "list"],
)
def test_long_vectors(self, long_df, long_semantics, vector_type):
variables = {key: long_df[val] for key, val in long_semantics.items()}
if vector_type == "numpy":
# Requires pandas >= 0.24
# {key: val.to_numpy() for key, val in variables.items()}
variables = {
key: np.asarray(val) for key, val in variables.items()
}
elif vector_type == "list":
# Requires pandas >= 0.24
# {key: val.to_list() for key, val in variables.items()}
variables = {
key: val.tolist() for key, val in variables.items()
}
p = _RelationalPlotter(variables=variables)
assert p.input_format == "long"
assert list(p.variables) == list(long_semantics)
if vector_type == "series":
assert p.variables == long_semantics
for key, val in long_semantics.items():
assert_array_equal(p.plot_data[key], long_df[val])
def test_long_undefined_variables(self, long_df):
p = _RelationalPlotter()
with pytest.raises(ValueError):
p.assign_variables(
data=long_df, variables=dict(x="not_in_df"),
)
with pytest.raises(ValueError):
p.assign_variables(
data=long_df, variables=dict(x="x", y="not_in_df"),
)
with pytest.raises(ValueError):
p.assign_variables(
data=long_df, variables=dict(x="x", y="y", hue="not_in_df"),
)
@pytest.mark.parametrize(
"arg", [[], np.array([]), pd.DataFrame()],
)
def test_empty_data_input(self, arg):
p = _RelationalPlotter(data=arg)
assert not p.variables
if not isinstance(arg, pd.DataFrame):
p = _RelationalPlotter(variables=dict(x=arg, y=arg))
assert not p.variables
def test_units(self, repeated_df):
p = _RelationalPlotter(
data=repeated_df,
variables=dict(x="x", y="y", units="u"),
)
assert_array_equal(p.plot_data["units"], repeated_df["u"])
def test_relplot_simple(self, long_df):
g = relplot(data=long_df, x="x", y="y", kind="scatter")
x, y = g.ax.collections[0].get_offsets().T
assert_array_equal(x, long_df["x"])
assert_array_equal(y, long_df["y"])
g = relplot(data=long_df, x="x", y="y", kind="line")
x, y = g.ax.lines[0].get_xydata().T
expected = long_df.groupby("x").y.mean()
assert_array_equal(x, expected.index)
assert y == pytest.approx(expected.values)
with pytest.raises(ValueError):
g = relplot(data=long_df, x="x", y="y", kind="not_a_kind")
def test_relplot_complex(self, long_df):
for sem in ["hue", "size", "style"]:
g = relplot(data=long_df, x="x", y="y", **{sem: "a"})
x, y = g.ax.collections[0].get_offsets().T
assert_array_equal(x, long_df["x"])
assert_array_equal(y, long_df["y"])
for sem in ["hue", "size", "style"]:
g = relplot(
data=long_df, x="x", y="y", col="c", **{sem: "a"}
)
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
for sem in ["size", "style"]:
g = relplot(
data=long_df, x="x", y="y", hue="b", col="c", **{sem: "a"}
)
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
for sem in ["hue", "size", "style"]:
g = relplot(
data=long_df.sort_values(["c", "b"]),
x="x", y="y", col="b", row="c", **{sem: "a"}
)
grouped = long_df.groupby(["c", "b"])
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
@pytest.mark.parametrize(
"vector_type",
["series", "numpy", "list"],
)
def test_relplot_vectors(self, long_df, vector_type):
semantics = dict(x="x", y="y", hue="f", col="c")
kws = {key: long_df[val] for key, val in semantics.items()}
g = relplot(data=long_df, **kws)
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
def test_relplot_wide(self, wide_df):
g = relplot(data=wide_df)
x, y = g.ax.collections[0].get_offsets().T
assert_array_equal(y, wide_df.values.T.ravel())
def test_relplot_hues(self, long_df):
palette = ["r", "b", "g"]
g = relplot(
x="x", y="y", hue="a", style="b", col="c",
palette=palette, data=long_df
)
palette = dict(zip(long_df["a"].unique(), palette))
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
points = ax.collections[0]
expected_hues = [palette[val] for val in grp_df["a"]]
assert same_color(points.get_facecolors(), expected_hues)
def test_relplot_sizes(self, long_df):
sizes = [5, 12, 7]
g = relplot(
data=long_df,
x="x", y="y", size="a", hue="b", col="c",
sizes=sizes,
)
sizes = dict(zip(long_df["a"].unique(), sizes))
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
points = ax.collections[0]
expected_sizes = [sizes[val] for val in grp_df["a"]]
assert_array_equal(points.get_sizes(), expected_sizes)
def test_relplot_styles(self, long_df):
markers = ["o", "d", "s"]
g = relplot(
data=long_df,
x="x", y="y", style="a", hue="b", col="c",
markers=markers,
)
paths = []
for m in markers:
m = mpl.markers.MarkerStyle(m)
paths.append(m.get_path().transformed(m.get_transform()))
paths = dict(zip(long_df["a"].unique(), paths))
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
points = ax.collections[0]
expected_paths = [paths[val] for val in grp_df["a"]]
assert self.paths_equal(points.get_paths(), expected_paths)
def test_relplot_stringy_numerics(self, long_df):
long_df["x_str"] = long_df["x"].astype(str)
g = relplot(data=long_df, x="x", y="y", hue="x_str")
points = g.ax.collections[0]
xys = points.get_offsets()
mask = np.ma.getmask(xys)
assert not mask.any()
assert_array_equal(xys, long_df[["x", "y"]])
g = relplot(data=long_df, x="x", y="y", size="x_str")
points = g.ax.collections[0]
xys = points.get_offsets()
mask = np.ma.getmask(xys)
assert not mask.any()
assert_array_equal(xys, long_df[["x", "y"]])
def test_relplot_legend(self, long_df):
g = relplot(data=long_df, x="x", y="y")
assert g._legend is None
g = relplot(data=long_df, x="x", y="y", hue="a")
texts = [t.get_text() for t in g._legend.texts]
expected_texts = long_df["a"].unique()
assert_array_equal(texts, expected_texts)
g = relplot(data=long_df, x="x", y="y", hue="s", size="s")
texts = [t.get_text() for t in g._legend.texts]
assert_array_equal(texts, np.sort(texts))
g = relplot(data=long_df, x="x", y="y", hue="a", legend=False)
assert g._legend is None
palette = color_palette("deep", len(long_df["b"].unique()))
a_like_b = dict(zip(long_df["a"].unique(), long_df["b"].unique()))
long_df["a_like_b"] = long_df["a"].map(a_like_b)
g = relplot(
data=long_df,
x="x", y="y", hue="b", style="a_like_b",
palette=palette, kind="line", estimator=None,
)
lines = g._legend.get_lines()[1:] # Chop off title dummy
for line, color in zip(lines, palette):
assert line.get_color() == color
def test_ax_kwarg_removal(self, long_df):
f, ax = plt.subplots()
with pytest.warns(UserWarning):
g = relplot(data=long_df, x="x", y="y", ax=ax)
assert len(ax.collections) == 0
assert len(g.ax.collections) > 0
class TestLinePlotter(Helpers):
def test_aggregate(self, long_df):
p = _LinePlotter(data=long_df, variables=dict(x="x", y="y"))
p.n_boot = 10000
p.sort = False
x = pd.Series(np.tile([1, 2], 100))
y = pd.Series(np.random.randn(200))
y_mean = y.groupby(x).mean()
def sem(x):
return np.std(x) / np.sqrt(len(x))
y_sem = y.groupby(x).apply(sem)
y_cis = pd.DataFrame(dict(low=y_mean - y_sem,
high=y_mean + y_sem),
columns=["low", "high"])
p.ci = 68
p.estimator = "mean"
index, est, cis = p.aggregate(y, x)
assert_array_equal(index.values, x.unique())
assert est.index.equals(index)
assert est.values == pytest.approx(y_mean.values)
assert cis.values == pytest.approx(y_cis.values, 4)
assert list(cis.columns) == ["low", "high"]
p.estimator = np.mean
index, est, cis = p.aggregate(y, x)
assert_array_equal(index.values, x.unique())
assert est.index.equals(index)
assert est.values == pytest.approx(y_mean.values)
assert cis.values == pytest.approx(y_cis.values, 4)
assert list(cis.columns) == ["low", "high"]
p.seed = 0
_, _, ci1 = p.aggregate(y, x)
_, _, ci2 = p.aggregate(y, x)
assert_array_equal(ci1, ci2)
y_std = y.groupby(x).std()
y_cis = pd.DataFrame(dict(low=y_mean - y_std,
high=y_mean + y_std),
columns=["low", "high"])
p.ci = "sd"
index, est, cis = p.aggregate(y, x)
assert_array_equal(index.values, x.unique())
assert est.index.equals(index)
assert est.values == pytest.approx(y_mean.values)
assert cis.values == pytest.approx(y_cis.values)
assert list(cis.columns) == ["low", "high"]
p.ci = None
index, est, cis = p.aggregate(y, x)
assert cis is None
p.ci = 68
x, y = pd.Series([1, 2, 3]), pd.Series([4, 3, 2])
index, est, cis = p.aggregate(y, x)
assert_array_equal(index.values, x)
assert_array_equal(est.values, y)
assert cis is None
x, y = pd.Series([1, 1, 2]), pd.Series([2, 3, 4])
index, est, cis = p.aggregate(y, x)
assert cis.loc[2].isnull().all()
p = _LinePlotter(data=long_df, variables=dict(x="x", y="y"))
p.estimator = "mean"
p.n_boot = 100
p.ci = 95
x = pd.Categorical(["a", "b", "a", "b"], ["a", "b", "c"])
y = pd.Series([1, 1, 2, 2])
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
index, est, cis = p.aggregate(y, x)
assert cis.loc[["c"]].isnull().all().all()
def test_legend_data(self, long_df):
f, ax = plt.subplots()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
legend="full"
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert handles == []
# --
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
legend="full",
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_color() for h in handles]
assert labels == p._hue_map.levels
assert colors == p._hue_map(p._hue_map.levels)
# --
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="a"),
legend="full",
)
p.map_style(markers=True)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_color() for h in handles]
markers = [h.get_marker() for h in handles]
assert labels == p._hue_map.levels
assert labels == p._style_map.levels
assert colors == p._hue_map(p._hue_map.levels)
assert markers == p._style_map(p._style_map.levels, "marker")
# --
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="b"),
legend="full",
)
p.map_style(markers=True)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_color() for h in handles]
markers = [h.get_marker() for h in handles]
expected_labels = (
["a"]
+ p._hue_map.levels
+ ["b"] + p._style_map.levels
)
expected_colors = (
["w"] + p._hue_map(p._hue_map.levels)
+ ["w"] + [".2" for _ in p._style_map.levels]
)
expected_markers = (
[""] + ["None" for _ in p._hue_map.levels]
+ [""] + p._style_map(p._style_map.levels, "marker")
)
assert labels == expected_labels
assert colors == expected_colors
assert markers == expected_markers
# --
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", size="a"),
legend="full"
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_color() for h in handles]
widths = [h.get_linewidth() for h in handles]
assert labels == p._hue_map.levels
assert labels == p._size_map.levels
assert colors == p._hue_map(p._hue_map.levels)
assert widths == p._size_map(p._size_map.levels)
# --
x, y = np.random.randn(2, 40)
z = np.tile(np.arange(20), 2)
p = _LinePlotter(variables=dict(x=x, y=y, hue=z))
ax.clear()
p.legend = "full"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == [str(l) for l in p._hue_map.levels]
ax.clear()
p.legend = "brief"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._hue_map.levels)
p = _LinePlotter(variables=dict(x=x, y=y, size=z))
ax.clear()
p.legend = "full"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == [str(l) for l in p._size_map.levels]
ax.clear()
p.legend = "brief"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._size_map.levels)
ax.clear()
p.legend = "auto"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._size_map.levels)
ax.clear()
p.legend = True
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._size_map.levels)
ax.clear()
p.legend = "bad_value"
with pytest.raises(ValueError):
p.add_legend_data(ax)
ax.clear()
p = _LinePlotter(
variables=dict(x=x, y=y, hue=z + 1),
legend="brief"
)
p.map_hue(norm=mpl.colors.LogNorm()),
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert float(labels[1]) / float(labels[0]) == 10
ax.clear()
p = _LinePlotter(
variables=dict(x=x, y=y, hue=z % 2),
legend="auto"
)
p.map_hue(norm=mpl.colors.LogNorm()),
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == ["0", "1"]
ax.clear()
p = _LinePlotter(
variables=dict(x=x, y=y, size=z + 1),
legend="brief"
)
p.map_size(norm=mpl.colors.LogNorm())
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert float(labels[1]) / float(labels[0]) == 10
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="f"),
legend="brief",
)
p.add_legend_data(ax)
expected_labels = ['0.20', '0.22', '0.24', '0.26', '0.28']
handles, labels = ax.get_legend_handles_labels()
assert labels == expected_labels
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", size="f"),
legend="brief",
)
p.add_legend_data(ax)
expected_levels = ['0.20', '0.22', '0.24', '0.26', '0.28']
handles, labels = ax.get_legend_handles_labels()
assert labels == expected_levels
def test_plot(self, long_df, repeated_df):
f, ax = plt.subplots()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
sort=False,
estimator=None
)
p.plot(ax, {})
line, = ax.lines
assert_array_equal(line.get_xdata(), long_df.x.values)
assert_array_equal(line.get_ydata(), long_df.y.values)
ax.clear()
p.plot(ax, {"color": "k", "label": "test"})
line, = ax.lines
assert line.get_color() == "k"
assert line.get_label() == "test"
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
sort=True, estimator=None
)
ax.clear()
p.plot(ax, {})
line, = ax.lines
sorted_data = long_df.sort_values(["x", "y"])
assert_array_equal(line.get_xdata(), sorted_data.x.values)
assert_array_equal(line.get_ydata(), sorted_data.y.values)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
)
ax.clear()
p.plot(ax, {})
assert len(ax.lines) == len(p._hue_map.levels)
for line, level in zip(ax.lines, p._hue_map.levels):
assert line.get_color() == p._hue_map(level)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", size="a"),
)
ax.clear()
p.plot(ax, {})
assert len(ax.lines) == len(p._size_map.levels)
for line, level in zip(ax.lines, p._size_map.levels):
assert line.get_linewidth() == p._size_map(level)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="a"),
)
p.map_style(markers=True)
ax.clear()
p.plot(ax, {})
assert len(ax.lines) == len(p._hue_map.levels)
assert len(ax.lines) == len(p._style_map.levels)
for line, level in zip(ax.lines, p._hue_map.levels):
assert line.get_color() == p._hue_map(level)
assert line.get_marker() == p._style_map(level, "marker")
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="b"),
)
p.map_style(markers=True)
ax.clear()
p.plot(ax, {})
levels = product(p._hue_map.levels, p._style_map.levels)
expected_line_count = len(p._hue_map.levels) * len(p._style_map.levels)
assert len(ax.lines) == expected_line_count
for line, (hue, style) in zip(ax.lines, levels):
assert line.get_color() == p._hue_map(hue)
assert line.get_marker() == p._style_map(style, "marker")
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
estimator="mean", err_style="band", ci="sd", sort=True
)
ax.clear()
p.plot(ax, {})
line, = ax.lines
expected_data = long_df.groupby("x").y.mean()
assert_array_equal(line.get_xdata(), expected_data.index.values)
assert np.allclose(line.get_ydata(), expected_data.values)
assert len(ax.collections) == 1
# Test that nans do not propagate to means or CIs
p = _LinePlotter(
variables=dict(
x=[1, 1, 1, 2, 2, 2, 3, 3, 3],
y=[1, 2, 3, 3, np.nan, 5, 4, 5, 6],
),
estimator="mean", err_style="band", ci=95, n_boot=100, sort=True,
)
ax.clear()
p.plot(ax, {})
line, = ax.lines
assert line.get_xdata().tolist() == [1, 2, 3]
err_band = ax.collections[0].get_paths()
assert len(err_band) == 1
assert len(err_band[0].vertices) == 9
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
estimator="mean", err_style="band", ci="sd"
)
ax.clear()
p.plot(ax, {})
assert len(ax.lines) == len(ax.collections) == len(p._hue_map.levels)
for c in ax.collections:
assert isinstance(c, mpl.collections.PolyCollection)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
estimator="mean", err_style="bars", ci="sd"
)
ax.clear()
p.plot(ax, {})
n_lines = len(ax.lines)
assert n_lines / 2 == len(ax.collections) == len(p._hue_map.levels)
assert len(ax.collections) == len(p._hue_map.levels)
for c in ax.collections:
assert isinstance(c, mpl.collections.LineCollection)
p = _LinePlotter(
data=repeated_df,
variables=dict(x="x", y="y", units="u"),
estimator=None
)
ax.clear()
p.plot(ax, {})
n_units = len(repeated_df["u"].unique())
assert len(ax.lines) == n_units
p = _LinePlotter(
data=repeated_df,
variables=dict(x="x", y="y", hue="a", units="u"),
estimator=None
)
ax.clear()
p.plot(ax, {})
n_units *= len(repeated_df["a"].unique())
assert len(ax.lines) == n_units
p.estimator = "mean"
with pytest.raises(ValueError):
p.plot(ax, {})
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
err_style="band", err_kws={"alpha": .5},
)
ax.clear()
p.plot(ax, {})
for band in ax.collections:
assert band.get_alpha() == .5
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
err_style="bars", err_kws={"elinewidth": 2},
)
ax.clear()
p.plot(ax, {})
for lines in ax.collections:
assert lines.get_linestyles() == 2
p.err_style = "invalid"
with pytest.raises(ValueError):
p.plot(ax, {})
x_str = long_df["x"].astype(str)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue=x_str),
)
ax.clear()
p.plot(ax, {})
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", size=x_str),
)
ax.clear()
p.plot(ax, {})
def test_axis_labels(self, long_df):
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
)
p.plot(ax1, {})
assert ax1.get_xlabel() == "x"
assert ax1.get_ylabel() == "y"
p.plot(ax2, {})
assert ax2.get_xlabel() == "x"
assert ax2.get_ylabel() == "y"
assert not ax2.yaxis.label.get_visible()
def test_matplotlib_kwargs(self, long_df):
kws = {
"linestyle": "--",
"linewidth": 3,
"color": (1, .5, .2),
"markeredgecolor": (.2, .5, .2),
"markeredgewidth": 1,
}
ax = lineplot(data=long_df, x="x", y="y", **kws)
line, *_ = ax.lines
for key, val in kws.items():
plot_val = getattr(line, f"get_{key}")()
assert plot_val == val
def test_lineplot_axes(self, wide_df):
f1, ax1 = plt.subplots()
f2, ax2 = plt.subplots()
ax = lineplot(data=wide_df)
assert ax is ax2
ax = lineplot(data=wide_df, ax=ax1)
assert ax is ax1
def test_lineplot_vs_relplot(self, long_df, long_semantics):
ax = lineplot(data=long_df, **long_semantics)
g = relplot(data=long_df, kind="line", **long_semantics)
lin_lines = ax.lines
rel_lines = g.ax.lines
for l1, l2 in zip(lin_lines, rel_lines):
assert_array_equal(l1.get_xydata(), l2.get_xydata())
assert same_color(l1.get_color(), l2.get_color())
assert l1.get_linewidth() == l2.get_linewidth()
assert l1.get_linestyle() == l2.get_linestyle()
def test_lineplot_smoke(
self,
wide_df, wide_array,
wide_list_of_series, wide_list_of_arrays, wide_list_of_lists,
flat_array, flat_series, flat_list,
long_df, missing_df, object_df
):
f, ax = plt.subplots()
lineplot(x=[], y=[])
ax.clear()
lineplot(data=wide_df)
ax.clear()
lineplot(data=wide_array)
ax.clear()
lineplot(data=wide_list_of_series)
ax.clear()
lineplot(data=wide_list_of_arrays)
ax.clear()
lineplot(data=wide_list_of_lists)
ax.clear()
lineplot(data=flat_series)
ax.clear()
lineplot(data=flat_array)
ax.clear()
lineplot(data=flat_list)
ax.clear()
lineplot(x="x", y="y", data=long_df)
ax.clear()
lineplot(x=long_df.x, y=long_df.y)
ax.clear()
lineplot(x=long_df.x, y="y", data=long_df)
ax.clear()
lineplot(x="x", y=long_df.y.values, data=long_df)
ax.clear()
lineplot(x="x", y="t", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", style="a", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", style="b", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", style="a", data=missing_df)
ax.clear()
lineplot(x="x", y="y", hue="a", style="b", data=missing_df)
ax.clear()
lineplot(x="x", y="y", hue="a", size="a", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", size="s", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", size="a", data=missing_df)
ax.clear()
lineplot(x="x", y="y", hue="a", size="s", data=missing_df)
ax.clear()
lineplot(x="x", y="y", hue="f", data=object_df)
ax.clear()
lineplot(x="x", y="y", hue="c", size="f", data=object_df)
ax.clear()
lineplot(x="x", y="y", hue="f", size="s", data=object_df)
ax.clear()
class TestScatterPlotter(Helpers):
def test_legend_data(self, long_df):
m = mpl.markers.MarkerStyle("o")
default_mark = m.get_path().transformed(m.get_transform())
m = mpl.markers.MarkerStyle("")
null = m.get_path().transformed(m.get_transform())
f, ax = plt.subplots()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y"),
legend="full",
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert handles == []
# --
ax.clear()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
legend="full",
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_facecolors()[0] for h in handles]
expected_colors = p._hue_map(p._hue_map.levels)
assert labels == p._hue_map.levels
assert same_color(colors, expected_colors)
# --
ax.clear()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="a"),
legend="full",
)
p.map_style(markers=True)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_facecolors()[0] for h in handles]
expected_colors = p._hue_map(p._hue_map.levels)
paths = [h.get_paths()[0] for h in handles]
expected_paths = p._style_map(p._style_map.levels, "path")
assert labels == p._hue_map.levels
assert labels == p._style_map.levels
assert same_color(colors, expected_colors)
assert self.paths_equal(paths, expected_paths)
# --
ax.clear()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="b"),
legend="full",
)
p.map_style(markers=True)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_facecolors()[0] for h in handles]
paths = [h.get_paths()[0] for h in handles]
expected_colors = (
["w"] + p._hue_map(p._hue_map.levels)
+ ["w"] + [".2" for _ in p._style_map.levels]
)
expected_paths = (
[null] + [default_mark for _ in p._hue_map.levels]
+ [null] + p._style_map(p._style_map.levels, "path")
)
assert labels == (
["a"] + p._hue_map.levels + ["b"] + p._style_map.levels
)
assert same_color(colors, expected_colors)
assert self.paths_equal(paths, expected_paths)
# --
ax.clear()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", size="a"),
legend="full"
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_facecolors()[0] for h in handles]
expected_colors = p._hue_map(p._hue_map.levels)
sizes = [h.get_sizes()[0] for h in handles]
expected_sizes = p._size_map(p._size_map.levels)
assert labels == p._hue_map.levels
assert labels == p._size_map.levels
assert same_color(colors, expected_colors)
assert sizes == expected_sizes
# --
ax.clear()
sizes_list = [10, 100, 200]
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", size="s"),
legend="full",
)
p.map_size(sizes=sizes_list)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
sizes = [h.get_sizes()[0] for h in handles]
expected_sizes = p._size_map(p._size_map.levels)
assert labels == [str(l) for l in p._size_map.levels]
assert sizes == expected_sizes
# --
ax.clear()
sizes_dict = {2: 10, 4: 100, 8: 200}
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", size="s"),
legend="full"
)
p.map_size(sizes=sizes_dict)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
sizes = [h.get_sizes()[0] for h in handles]
expected_sizes = p._size_map(p._size_map.levels)
assert labels == [str(l) for l in p._size_map.levels]
assert sizes == expected_sizes
# --
x, y = np.random.randn(2, 40)
z = np.tile(np.arange(20), 2)
p = _ScatterPlotter(
variables=dict(x=x, y=y, hue=z),
)
ax.clear()
p.legend = "full"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == [str(l) for l in p._hue_map.levels]
ax.clear()
p.legend = "brief"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._hue_map.levels)
p = _ScatterPlotter(
variables=dict(x=x, y=y, size=z),
)
ax.clear()
p.legend = "full"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == [str(l) for l in p._size_map.levels]
ax.clear()
p.legend = "brief"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._size_map.levels)
ax.clear()
p.legend = "bad_value"
with pytest.raises(ValueError):
p.add_legend_data(ax)
def test_plot(self, long_df, repeated_df):
f, ax = plt.subplots()
p = _ScatterPlotter(data=long_df, variables=dict(x="x", y="y"))
p.plot(ax, {})
points = ax.collections[0]
assert_array_equal(points.get_offsets(), long_df[["x", "y"]].values)
ax.clear()
p.plot(ax, {"color": "k", "label": "test"})
points = ax.collections[0]
assert same_color(points.get_facecolor(), "k")
assert points.get_label() == "test"
p = _ScatterPlotter(
data=long_df, variables=dict(x="x", y="y", hue="a")
)
ax.clear()
p.plot(ax, {})
points = ax.collections[0]
expected_colors = p._hue_map(p.plot_data["hue"])
assert same_color(points.get_facecolors(), expected_colors)
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", style="c"),
)
p.map_style(markers=["+", "x"])
ax.clear()
color = (1, .3, .8)
p.plot(ax, {"color": color})
points = ax.collections[0]
assert same_color(points.get_edgecolors(), [color])
p = _ScatterPlotter(
data=long_df, variables=dict(x="x", y="y", size="a"),
)
ax.clear()
p.plot(ax, {})
points = ax.collections[0]
expected_sizes = p._size_map(p.plot_data["size"])
assert_array_equal(points.get_sizes(), expected_sizes)
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="a"),
)
p.map_style(markers=True)
ax.clear()
p.plot(ax, {})
points = ax.collections[0]
expected_colors = p._hue_map(p.plot_data["hue"])
expected_paths = p._style_map(p.plot_data["style"], "path")
assert same_color(points.get_facecolors(), expected_colors)
assert self.paths_equal(points.get_paths(), expected_paths)
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="b"),
)
p.map_style(markers=True)
ax.clear()
p.plot(ax, {})
points = ax.collections[0]
expected_colors = p._hue_map(p.plot_data["hue"])
expected_paths = p._style_map(p.plot_data["style"], "path")
assert same_color(points.get_facecolors(), expected_colors)
assert self.paths_equal(points.get_paths(), expected_paths)
x_str = long_df["x"].astype(str)
p = _ScatterPlotter(
data=long_df, variables=dict(x="x", y="y", hue=x_str),
)
ax.clear()
p.plot(ax, {})
p = _ScatterPlotter(
data=long_df, variables=dict(x="x", y="y", size=x_str),
)
ax.clear()
p.plot(ax, {})
def test_axis_labels(self, long_df):
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
p = _ScatterPlotter(data=long_df, variables=dict(x="x", y="y"))
p.plot(ax1, {})
assert ax1.get_xlabel() == "x"
assert ax1.get_ylabel() == "y"
p.plot(ax2, {})
assert ax2.get_xlabel() == "x"
assert ax2.get_ylabel() == "y"
assert not ax2.yaxis.label.get_visible()
def test_scatterplot_axes(self, wide_df):
f1, ax1 = plt.subplots()
f2, ax2 = plt.subplots()
ax = scatterplot(data=wide_df)
assert ax is ax2
ax = scatterplot(data=wide_df, ax=ax1)
assert ax is ax1
def test_literal_attribute_vectors(self):
f, ax = plt.subplots()
x = y = [1, 2, 3]
s = [5, 10, 15]
c = [(1, 1, 0, 1), (1, 0, 1, .5), (.5, 1, 0, 1)]
scatterplot(x=x, y=y, c=c, s=s, ax=ax)
points, = ax.collections
assert_array_equal(points.get_sizes().squeeze(), s)
assert_array_equal(points.get_facecolors(), c)
def test_linewidths(self, long_df):
f, ax = plt.subplots()
scatterplot(data=long_df, x="x", y="y", s=10)
scatterplot(data=long_df, x="x", y="y", s=20)
points1, points2 = ax.collections
assert (
points1.get_linewidths().item() < points2.get_linewidths().item()
)
ax.clear()
scatterplot(data=long_df, x="x", y="y", s=long_df["x"])
scatterplot(data=long_df, x="x", y="y", s=long_df["x"] * 2)
points1, points2 = ax.collections
assert (
points1.get_linewidths().item() < points2.get_linewidths().item()
)
ax.clear()
scatterplot(data=long_df, x="x", y="y", size=long_df["x"])
scatterplot(data=long_df, x="x", y="y", size=long_df["x"] * 2)
points1, points2, *_ = ax.collections
assert (
points1.get_linewidths().item() < points2.get_linewidths().item()
)
ax.clear()
lw = 2
scatterplot(data=long_df, x="x", y="y", linewidth=lw)
assert ax.collections[0].get_linewidths().item() == lw
def test_datetime_scale(self, long_df):
ax = scatterplot(data=long_df, x="t", y="y")
# Check that we avoid weird matplotlib default auto scaling
# https://github.com/matplotlib/matplotlib/issues/17586
ax.get_xlim()[0] > ax.xaxis.convert_units(np.datetime64("2002-01-01"))
def test_scatterplot_vs_relplot(self, long_df, long_semantics):
ax = scatterplot(data=long_df, **long_semantics)
g = relplot(data=long_df, kind="scatter", **long_semantics)
for s_pts, r_pts in zip(ax.collections, g.ax.collections):
assert_array_equal(s_pts.get_offsets(), r_pts.get_offsets())
assert_array_equal(s_pts.get_sizes(), r_pts.get_sizes())
assert_array_equal(s_pts.get_facecolors(), r_pts.get_facecolors())
assert self.paths_equal(s_pts.get_paths(), r_pts.get_paths())
def test_scatterplot_smoke(
self,
wide_df, wide_array,
flat_series, flat_array, flat_list,
wide_list_of_series, wide_list_of_arrays, wide_list_of_lists,
long_df, missing_df, object_df
):
f, ax = plt.subplots()
scatterplot(x=[], y=[])
ax.clear()
scatterplot(data=wide_df)
ax.clear()
scatterplot(data=wide_array)
ax.clear()
scatterplot(data=wide_list_of_series)
ax.clear()
scatterplot(data=wide_list_of_arrays)
ax.clear()
scatterplot(data=wide_list_of_lists)
ax.clear()
scatterplot(data=flat_series)
ax.clear()
scatterplot(data=flat_array)
ax.clear()
scatterplot(data=flat_list)
ax.clear()
scatterplot(x="x", y="y", data=long_df)
ax.clear()
scatterplot(x=long_df.x, y=long_df.y)
ax.clear()
scatterplot(x=long_df.x, y="y", data=long_df)
ax.clear()
scatterplot(x="x", y=long_df.y.values, data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", style="a", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", style="b", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", style="a", data=missing_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", style="b", data=missing_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", size="a", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", size="s", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", size="a", data=missing_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", size="s", data=missing_df)
ax.clear()
scatterplot(x="x", y="y", hue="f", data=object_df)
ax.clear()
scatterplot(x="x", y="y", hue="c", size="f", data=object_df)
ax.clear()
scatterplot(x="x", y="y", hue="f", size="s", data=object_df)
ax.clear()
| iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/seaborn/tests/test_relational.py | Python | gpl-2.0 | 56,653 |
# encoding: utf-8
# module samba.dcerpc.lsa
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/lsa.so
# by generator 1.135
""" lsa DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class LUID(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
high = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
low = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/lsa/LUID.py | Python | gpl-2.0 | 734 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
################################################################################
# DChars Copyright (C) 2012 Suizokukan
# Contact: suizokukan _A.T._ orange dot fr
#
# This file is part of DChars.
# DChars is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DChars is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DChars. If not, see <http://www.gnu.org/licenses/>.
################################################################################
"""
❏DChars❏ : dchars/languages/grc/dstring.py
"""
# problem with Pylint :
# pylint: disable=E0611
# many errors like "No name 'extensions' in module 'dchars'"
import re
import unicodedata
from dchars.errors.errors import DCharsError
from dchars.utilities.lstringtools import isort_a_lstrings_bylen_nodup
from dchars.dstring import DStringMotherClass
from dchars.languages.grc.dcharacter import DCharacterGRC
from dchars.languages.grc.symbols import SYMB_PUNCTUATION, \
SYMB_UPPER_CASE, \
SYMB_LOWER_CASE, \
SYMB_OTHER_SYMBOLS, \
SYMB_DIACRITICS
from dchars.languages.grc.symbols import SYMB_DIACRITICS__TONOS, \
SYMB_DIACRITICS__MEKOS, \
SYMB_DIACRITICS__PNEUMA
from dchars.utilities.lstringtools import number_of_occurences
from dchars.utilities.sortingvalue import SortingValue
# known transliterations :
import dchars.languages.grc.transliterations.basic.basic as basictrans
import dchars.languages.grc.transliterations.basic.ucombinations as basictrans_ucombinations
import dchars.languages.grc.transliterations.betacode.betacode as betacodetrans
import dchars.languages.grc.transliterations.betacode.ucombinations as betacodetrans_ucombinations
import dchars.languages.grc.transliterations.perseus.perseus as perseustrans
import dchars.languages.grc.transliterations.perseus.ucombinations as perseustrans_ucombinations
import dchars.languages.grc.transliterations.gutenberg.gutenberg as gutenbergtrans
import dchars.languages.grc.transliterations.gutenberg.ucombinations as gutenbergtrans_ucombinations
################################################################################
class DStringGRC(DStringMotherClass):
"""
class DStringGRC
DO NOT CREATE A DStringGRC object directly but use instead the
dchars.py::new_dstring function.
"""
# regex pattern used to slice a source string :
#
# NB : we use the default_symbols__pattern() function, NOT the normal
# default_symbols() function since some characters have to be
# treated apart to work with a regex.
pattern_letters = "|".join( isort_a_lstrings_bylen_nodup(
SYMB_LOWER_CASE.default_symbols__pattern() + \
SYMB_UPPER_CASE.default_symbols__pattern() + \
SYMB_OTHER_SYMBOLS.default_symbols__pattern() + \
SYMB_PUNCTUATION.default_symbols__pattern() ))
pattern_diacritics = "|".join( isort_a_lstrings_bylen_nodup(
SYMB_DIACRITICS.default_symbols__pattern() ))
pattern = re.compile("((?P<letter>{0})(?P<diacritics>({1})+)?)".format( pattern_letters,
pattern_diacritics ))
# transliterations' methods : available direction(s) :
trans__directions = {
"basic" : basictrans.AVAILABLE_DIRECTIONS,
"betacode" : betacodetrans.AVAILABLE_DIRECTIONS,
"gutenberg" : gutenbergtrans.AVAILABLE_DIRECTIONS,
"perseus" : perseustrans.AVAILABLE_DIRECTIONS,
}
# transliteration's functions :
trans__init_from_transliteration = {
"basic" : basictrans.dstring__init_from_translit_str,
"betacode": betacodetrans.dstring__init_from_translit_str,
"perseus" : perseustrans.dstring__init_from_translit_str,
"gutenberg" : None,
}
trans__get_transliteration = {
"basic" : basictrans.dstring__trans__get_trans,
"betacode" : betacodetrans.dstring__trans__get_trans,
"perseus" : perseustrans.dstring__trans__get_trans,
"gutenberg" : gutenbergtrans.dstring__trans__get_trans,
}
trans__get_transl_ucombinations = {
"basic" : basictrans_ucombinations.get_usefull_combinations,
"betacode" : betacodetrans_ucombinations.get_usefull_combinations,
"gutenberg" : gutenbergtrans_ucombinations.get_usefull_combinations,
"perseus" : perseustrans_ucombinations.get_usefull_combinations,
}
#///////////////////////////////////////////////////////////////////////////
def __init__(self, str_src = None):
"""
DStringGRC.__init__
the three following attributes have been created by the call to
dchars.py::new_dstring() :
self.iso639_3_name : (str)
self.transliteration_method : (str)
self.options : (dict)
"""
DStringMotherClass.__init__(self)
if str_src is not None:
self.init_from_str(str_src)
#///////////////////////////////////////////////////////////////////////////
def endsWithABareiaAccent(self):
"""
DStringGRC.endsWithABareiaAccent
Return True if the last vowel of the word is marked with
a βαρεῖα accent.
"καὶ" : True
"καί" : False
"""
res = False
for dchar in self[::-1]:
if dchar.base_char in ('α', 'ε', 'η', 'ι', 'ο', 'υ', 'ω'):
if dchar.tonos == 'βαρεῖα':
res = True
break
return res
#///////////////////////////////////////////////////////////////////////////
def get_sourcestr_representation(self, ignore_makron = False):
"""
DStringGRC.get_sourcestr_representation
PARAMETER :
o (bool) ignore_makron : if True, no makron will be added on the
characters
RETURN VALUE : a (str) string.
"""
res = []
for dchar in self:
res.append( dchar.get_sourcestr_representation(ignore_makron) )
return "".join(res)
#///////////////////////////////////////////////////////////////////////////
def get_usefull_combinations(self):
"""
DStringGRC.get_usefull_combinations
Return a DString with all the usefull combinations of characters,
i.e. only the 'interesting' characters (not punctuation if it's too simple
by example). The DChars stored in the dstring will be unique, id est, two
dchars will not have the same appearence (__str__())
NB : this function has nothing to do with linguistic or a strict
approach of the language. This function allows only to get the
most common and/or usefull characters of the writing system.
NB : function required by the dchars-fe project.
"""
self.clear()
dchar = DCharacterGRC(self)
for dchar in dchar.get_usefull_combinations():
already_present = False
for dchar2 in self:
if str(dchar) == str(dchar2):
already_present = True
if not already_present:
self.append( dchar )
return self
#///////////////////////////////////////////////////////////////////////////
def get_usefull_transl_combinations(self):
"""
DStringGRC.get_usefull_transl_combinations
Return a (str)string with all the usefull combinations of TRANSLITTERATED
characters, i.e. only the 'interesting' characters (not punctuation if
it's too simple by example).
NB : this function has nothing to do with linguistic or a strict
approach of the language. This function allows only to get the
most common and/or usefull characters of the writing system.
NB : function required by the dchars-fe project.
"""
# Pylint can't know that <self> has a 'transliteration_method' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringGRC' has no 'transliteration_method' member"
res = DStringGRC.trans__get_transl_ucombinations[self.transliteration_method]()
return res
#///////////////////////////////////////////////////////////////////////////
def get_transliteration(self):
"""
DStringGRC.get_transliteration
"""
# Pylint can't know that <self> has a 'trans__get_transliteration_method' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringBOD' has no 'trans__get_transliteration_method' member"
res = DStringGRC.trans__get_transliteration[self.transliteration_method](self)
return res
#///////////////////////////////////////////////////////////////////////////
def init_from_str(self, str_src):
"""
DStringGRC.init_from_str
Function called by __init__(), initialize <self> and return
<indexes_of_unrecognized_chars>.
str_src : str
HOW IT WORKS :
* (1) str_src -> (decomposition) unicodedata.normalize('NFD',) = normalized_src
* (2) = normalized_src -> (default symbols required) :
* replace_by_the_default_symbols() -> normalized_src
* (3) initialisation from the recognized characters.
* re.finditer(DStringGRC.pattern) give the symbols{letter+diacritics}
* (3.1) base_char
* (3.2) contextual_form
* (3.3) tonos (τόνος)
* (3.4) mekos (μῆκος)
* (3.5) pneuma (πνεῦμα)
* (3.6) hypogegrammene (ὑπογεγραμμένη)
* (3.7) dialutika (διαλυτικά)
* (3.8) we add the new character
"""
#.......................................................................
# (1) str_src -> (decomposition) unicodedata.normalize('NFD',) = normalized_src
#.......................................................................
normalized_src = unicodedata.normalize('NFD', str_src)
#.......................................................................
# (2) = normalized_src -> (default symbols required) :
# replace_by_the_default_symbols() -> normalized_src
#.......................................................................
normalized_src = SYMB_PUNCTUATION.replace_by_the_default_symbols(normalized_src)
normalized_src = SYMB_LOWER_CASE.replace_by_the_default_symbols(normalized_src)
normalized_src = SYMB_UPPER_CASE.replace_by_the_default_symbols(normalized_src)
normalized_src = SYMB_OTHER_SYMBOLS.replace_by_the_default_symbols(normalized_src)
normalized_src = SYMB_DIACRITICS.replace_by_the_default_symbols(normalized_src)
#.......................................................................
# (3) initialisation from the recognized characters.
# re.finditer(DStringGRC.pattern) give the symbols{letter+diacritics}
#.......................................................................
indexes = [] # indexes of the substring well analyzed : ( start, end )
for element in re.finditer(DStringGRC.pattern,
normalized_src):
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# we add the unknown characters at the beginning and in the middle
# of the string (see at the end of this function)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
if indexes:
# <indexes> isn't empty :
# ... we add the unknown character(s) between the last character and
# the current one :
for index in range( max(indexes[-1])+1, element.start() ):
new_character = DCharacterGRC(dstring_object = self,
unknown_char = True,
base_char = normalized_src[index])
self.append( new_character )
else:
# <indexes> is empty :
# ... we add the unknown character(s) before the first index in <indexes> :
for index in range( 0, element.start() ):
new_character = DCharacterGRC(dstring_object = self,
unknown_char = True,
base_char = normalized_src[index])
self.append( new_character )
indexes.append( (element.start(), element.end()-1 ) )
data = element.groupdict()
letter = data['letter']
diacritics = data['diacritics']
punctuation = letter in SYMB_PUNCTUATION.symbol2name
capital_letter = letter in SYMB_UPPER_CASE.symbol2name
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.1) base_char
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
if punctuation:
# punctuation symbol :
base_char = SYMB_PUNCTUATION.get_the_name_for_this_symbol(letter)
elif letter in SYMB_LOWER_CASE.symbol2name:
# lower case :
base_char = SYMB_LOWER_CASE.get_the_name_for_this_symbol(letter)
elif letter in SYMB_UPPER_CASE.symbol2name:
# upper case :
base_char = SYMB_UPPER_CASE.get_the_name_for_this_symbol(letter)
else:
# other symbols :
base_char = SYMB_OTHER_SYMBOLS.get_the_name_for_this_symbol(letter)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.2) contextual_form
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
if base_char == 'β' and not capital_letter:
contextual_form = "initial"
elif base_char == 'ϐ' and not capital_letter:
base_char = 'β'
contextual_form = "medium+final"
elif base_char == 'σ' and not capital_letter:
contextual_form = "initial+medium"
elif base_char == 'ς' and not capital_letter:
base_char = 'σ'
contextual_form = "final"
else:
contextual_form = "initial+medium+final"
tonos = None
mekos = None
pneuma = None
hypogegrammene = False
dialutika = False
if diacritics is not None:
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.3) tonos (τόνος)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
tonos_nbr = number_of_occurences( source_string = diacritics,
symbols = SYMB_DIACRITICS__TONOS )
if tonos_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), τόνος defined several times."
raise DCharsError( context = "DStringGRC.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()))
if SYMB_DIACRITICS.are_these_symbols_in_a_string('τόνος.βαρεῖα', diacritics):
tonos = "βαρεῖα"
elif SYMB_DIACRITICS.are_these_symbols_in_a_string('τόνος.ὀξεῖα', diacritics):
tonos = "ὀξεῖα"
elif SYMB_DIACRITICS.are_these_symbols_in_a_string('τόνος.περισπωμένη', diacritics):
tonos = "περισπωμένη"
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.4) mekos (μῆκος)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
mekos_nbr = number_of_occurences( source_string = diacritics,
symbols = SYMB_DIACRITICS__MEKOS)
if mekos_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), μῆκος defined several times."
raise DCharsError( context = "DStringGRC.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()))
if SYMB_DIACRITICS.are_these_symbols_in_a_string('μῆκος.μακρόν', diacritics):
mekos = "μακρόν"
elif SYMB_DIACRITICS.are_these_symbols_in_a_string('μῆκος.βραχύ', diacritics):
mekos = "βραχύ"
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.5) pneuma (πνεῦμα)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
pneuma_nbr = number_of_occurences( source_string = diacritics,
symbols = SYMB_DIACRITICS__PNEUMA)
if pneuma_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), πνεῦμα defined several times."
raise DCharsError( context = "DStringGRC.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()))
if SYMB_DIACRITICS.are_these_symbols_in_a_string('πνεῦμα.ψιλὸν', diacritics):
pneuma = "ψιλὸν"
elif SYMB_DIACRITICS.are_these_symbols_in_a_string('πνεῦμα.δασὺ', diacritics):
pneuma = "δασὺ"
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.6) hypogegrammene (ὑπογεγραμμένη)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
hypogegrammene_nbr = number_of_occurences(
source_string = diacritics,
symbols = SYMB_DIACRITICS['ὑπογεγραμμένη'])
if hypogegrammene_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), ὑπογεγραμμένη defined several times."
raise DCharsError( context = "DStringGRC.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()))
hypogegrammene = SYMB_DIACRITICS.are_these_symbols_in_a_string('ὑπογεγραμμένη',
diacritics)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.7) dialutika (διαλυτικά)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
dialutika_nbr = number_of_occurences( source_string = diacritics,
symbols = SYMB_DIACRITICS['διαλυτικά'])
if dialutika_nbr > 1:
err_msg = "In '{0}' (start={1}, end={2}), διαλυτικά defined several times."
raise DCharsError( context = "DStringGRC.init_from_str",
message = err_msg.format(element.string,
element.start(),
element.end()))
dialutika = SYMB_DIACRITICS.are_these_symbols_in_a_string('διαλυτικά', diacritics)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# (3.8) we add the new character
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
new_character = DCharacterGRC(dstring_object = self,
unknown_char = False,
base_char = base_char,
contextual_form = contextual_form,
punctuation = punctuation,
capital_letter = capital_letter,
tonos = tonos,
pneuma = pneuma,
hypogegrammene = hypogegrammene,
dialutika = dialutika,
mekos=mekos)
self.append( new_character )
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# we add the final unknown characters (see at the beginning of this
# function)
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
if indexes:
# <element> is the last one and <indexes> isn't empty :
for index in range( max(indexes[-1])+1, len(normalized_src) ):
new_character = DCharacterGRC(dstring_object = self,
unknown_char = True,
base_char = normalized_src[index])
self.append( new_character )
else:
# <indexes> is empty :
for index in range( 0, len(normalized_src) ):
new_character = DCharacterGRC(dstring_object = self,
unknown_char = True,
base_char = normalized_src[index])
self.append( new_character )
#///////////////////////////////////////////////////////////////////////////
def init_from_transliteration(self, src):
"""
DStringGRC.init_from_transliteration
src : string
Return <self>
"""
# Pylint can't know that <self> has a 'transliteration_method' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringGRC' has no 'transliteration_method' member"
DStringGRC.trans__init_from_transliteration[self.transliteration_method](
dstring = self,
dcharactertype = DCharacterGRC,
src = src)
return self
#///////////////////////////////////////////////////////////////////////////
def rm_soft_breathing_on_vowels(self):
"""
DStringGRC.rm_soft_breathing_on_vowels
Remove any soft breathing sign (ψιλὸν) on any vowel.
"""
for dchar in self:
if dchar.base_char in ('α', 'ε', 'η', 'ο', 'ω', 'ι', 'υ') and \
dchar.pneuma == 'ψιλὸν':
dchar.pneuma = None
#///////////////////////////////////////////////////////////////////////////
def sortingvalue(self):
"""
DStringGRC.sortingvalue
Return a SortingValue object
"""
res = SortingValue()
# Pylint can't know that <self> has an 'options' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringGRC' has no 'options' member"
if self.options["sorting method"] == "default":
# base character :
data = []
for char in self:
data.append( ({False:0,
True:1}[char.unknown_char],
char.base_char ))
res.append(data)
# pneuma :
data = []
for char in self:
data.append( { None : 0,
"ψιλὸν" : 1,
"δασὺ" : 2, }[char.pneuma])
res.append(data)
# tonos :
data = []
for char in self:
data.append( { None : 0,
"ὀξεῖα" : 1,
"βαρεῖα" : 2,
"περισπωμένη" : 3, }[char.tonos])
res.append(data)
# hypogegrammene :
data = []
for char in self:
data.append( { False : 0,
True : 1, }[char.hypogegrammene])
res.append(data)
# mekos :
data = []
for char in self:
data.append( { None : 0,
"βραχύ" : 1,
"μακρόν" : 2, }[char.mekos])
res.append(data)
else:
# Pylint can't know that <self> has an 'options' member
# created when <self> has been initialized by new_dstring() :
# pylint: disable=E1101
# -> "Instance of 'DStringGRC' has no 'options' member"
err_msg = "unknown sorting method '{0}'."
raise DCharsError( context = "DStringGRC.sortingvalue",
message = err_msg.format(self.options["sorting method"]) )
return res
| suizokukan/anceps | dchars/languages/grc/dstring.py | Python | gpl-3.0 | 27,522 |
ranged_attacker = "ranged attacker"
melee_attacker = "melee attacker"
healer = 'healer'
dismantling_attacker = 'dismantler'
general_attacker = 'general attacker'
tough_attacker = 'tough guy'
work_and_carry_attacker = 'multi-purpose attacker'
civilian = 'civilian'
scout = 'scout'
| daboross/screeps-warreport | warreport/constants.py | Python | mit | 280 |
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SkipSummaryDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr20548 fails to build on lab.llvm.org buildbot")
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr24462, Data formatters have problems on Windows")
def test_with_run_command(self):
"""Test data formatter commands."""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', '// Set break point at this line.')
def data_formatter_commands(self):
"""Test that that file and class static variables display correctly."""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
#import lldbsuite.test.lldbutil as lldbutil
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Setup the summaries for this scenario
#self.runCmd("type summary add --summary-string \"${var._M_dataplus._M_p}\" std::string")
self.runCmd(
"type summary add --summary-string \"Level 1\" \"DeepData_1\"")
self.runCmd(
"type summary add --summary-string \"Level 2\" \"DeepData_2\" -e")
self.runCmd(
"type summary add --summary-string \"Level 3\" \"DeepData_3\"")
self.runCmd(
"type summary add --summary-string \"Level 4\" \"DeepData_4\"")
self.runCmd(
"type summary add --summary-string \"Level 5\" \"DeepData_5\"")
# Default case, just print out summaries
self.expect('frame variable',
substrs=['(DeepData_1) data1 = Level 1',
'(DeepData_2) data2 = Level 2 {',
'm_child1 = Level 3',
'm_child2 = Level 3',
'm_child3 = Level 3',
'm_child4 = Level 3',
'}'])
# Skip the default (should be 1) levels of summaries
self.expect('frame variable --no-summary-depth',
substrs=['(DeepData_1) data1 = {',
'm_child1 = 0x',
'}',
'(DeepData_2) data2 = {',
'm_child1 = Level 3',
'm_child2 = Level 3',
'm_child3 = Level 3',
'm_child4 = Level 3',
'}'])
# Now skip 2 levels of summaries
self.expect('frame variable --no-summary-depth=2',
substrs=['(DeepData_1) data1 = {',
'm_child1 = 0x',
'}',
'(DeepData_2) data2 = {',
'm_child1 = {',
'm_child1 = 0x',
'Level 4',
'm_child2 = {',
'm_child3 = {',
'}'])
# Check that no "Level 3" comes out
self.expect(
'frame variable data1.m_child1 --no-summary-depth=2',
matching=False,
substrs=['Level 3'])
# Now expand a pointer with 2 level of skipped summaries
self.expect('frame variable data1.m_child1 --no-summary-depth=2',
substrs=['(DeepData_2 *) data1.m_child1 = 0x'])
# Deref and expand said pointer
self.expect('frame variable *data1.m_child1 --no-summary-depth=2',
substrs=['(DeepData_2) *data1.m_child1 = {',
'm_child2 = {',
'm_child1 = 0x',
'Level 4',
'}'])
# Expand an expression, skipping 2 layers of summaries
self.expect(
'frame variable data1.m_child1->m_child2 --no-summary-depth=2',
substrs=[
'(DeepData_3) data1.m_child1->m_child2 = {',
'm_child2 = {',
'm_child1 = Level 5',
'm_child2 = Level 5',
'm_child3 = Level 5',
'}'])
# Expand same expression, skipping only 1 layer of summaries
self.expect(
'frame variable data1.m_child1->m_child2 --no-summary-depth=1',
substrs=[
'(DeepData_3) data1.m_child1->m_child2 = {',
'm_child1 = 0x',
'Level 4',
'm_child2 = Level 4',
'}'])
# Bad debugging info on SnowLeopard gcc (Apple Inc. build 5666).
# Skip the following tests if the condition is met.
if self.getCompiler().endswith('gcc') and not self.getCompiler().endswith('llvm-gcc'):
import re
gcc_version_output = system(
[[lldbutil.which(self.getCompiler()), "-v"]])[1]
#print("my output:", gcc_version_output)
for line in gcc_version_output.split(os.linesep):
m = re.search('\(Apple Inc\. build ([0-9]+)\)', line)
#print("line:", line)
if m:
gcc_build = int(m.group(1))
#print("gcc build:", gcc_build)
if gcc_build >= 5666:
# rdar://problem/9804600"
self.skipTest(
"rdar://problem/9804600 wrong namespace for std::string in debug info")
# Expand same expression, skipping 3 layers of summaries
self.expect(
'frame variable data1.m_child1->m_child2 --show-types --no-summary-depth=3',
substrs=[
'(DeepData_3) data1.m_child1->m_child2 = {',
'm_some_text = "Just a test"',
'm_child2 = {',
'm_some_text = "Just a test"'])
# Change summary and expand, first without --no-summary-depth then with
# --no-summary-depth
self.runCmd(
"type summary add --summary-string \"${var.m_some_text}\" DeepData_5")
self.expect('fr var data2.m_child4.m_child2.m_child2', substrs=[
'(DeepData_5) data2.m_child4.m_child2.m_child2 = "Just a test"'])
self.expect(
'fr var data2.m_child4.m_child2.m_child2 --no-summary-depth',
substrs=[
'(DeepData_5) data2.m_child4.m_child2.m_child2 = {',
'm_some_text = "Just a test"',
'}'])
| apple/swift-lldb | packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-skip-summary/TestDataFormatterSkipSummary.py | Python | apache-2.0 | 7,621 |
# -*- python -*-
# Package : omniidl
# typecode.py Created on: 1999/12/2
# Author : David Scott (djs)
#
# Copyright (C) 2003-2012 Apasphere Ltd
# Copyright (C) 1999 AT&T Laboratories Cambridge
#
# This file is part of omniidl.
#
# omniidl is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
"""Produces the instances of CORBA::TypeCode"""
from omniidl import idlast, idltype, idlutil
from omniidl_be.cxx import ast, output, util, config, types, id
from omniidl_be.cxx.dynskel import template
import sys
self = sys.modules[__name__]
stream = None
tophalf = None
bottomhalf = None
# For a given type declaration, creates (private) static instances of
# CORBA::TypeCode_ptr for that type, and any necessary for contained
# constructed types. Contained types from other files cannot be used
# because the order of static initialiser execution is not defined.
# eg
# IDL: struct testStruct{
# char a;
# foo b; // b is defined elsewhere
# };
# becomes: static CORBA::PR_StructMember _0RL_structmember_testStruct[] = {
# {"a", CORBA::TypeCode::PR_char_tc()},
# {"b", _0RL_tc_foo} // defined elsewhere
# };
# static CORBA::TypeCode_ptr _0RL_tc_testStruct = .....
#
# Types constructed in the main file have an externally visible symbol
# defined:
# const CORBA::TypeCode_ptr _tc_testStruct = _0RL_tc_testStruct
#
# ----------------------------------------------------------------------
# Utility functions local to this module start here
# ----------------------------------------------------------------------
class NameAlreadyDefined:
def __str__(self):
return "Name has already been defined in this scope/block/file/section"
# returns true if the name has already been defined, and need not be defined
# again.
def alreadyDefined(mangledname):
return self.__defined_names.has_key(mangledname)
def defineName(mangledname):
self.__defined_names[mangledname] = 1
def forwardUsed(node):
sname = idlutil.slashName(node.scopedName())
self.__forwards_pending[sname] = 1
def resolveForward(node):
sname = idlutil.slashName(node.scopedName())
if self.__forwards_pending.has_key(sname):
del self.__forwards_pending[sname]
return 1
return 0
# mangleName("_0RL_tc", ["CORBA", "Object"]) -> "_ORL_tc_CORBA_Object"
def mangleName(prefix, scopedName):
mangled = prefix + id.Name(scopedName).guard()
return mangled
# Note: The AST has a notion of recursive structs and unions, but it can only
# say whether it is recursive, and not tell you how many nodes up the tree
# the recursive definition is. So we keep track of currently being-defined
# nodes here for that purpose.
self.__currentNodes = []
def startingNode(node):
self.__currentNodes.append(node)
def finishingNode():
assert(self.__currentNodes != [])
self.__currentNodes = self.__currentNodes[:-1]
def currently_being_defined(node):
return node in self.__currentNodes
def recursive_Depth(node):
return len(self.__currentNodes) - self.__currentNodes.index(node)
def init(stream):
self.stream = stream
# declarations are built in two halves, this is to allow us
# to keep the same order as the old backend. It could be simpler.
self.tophalf = stream
self.bottomhalf = stream
self.__immediatelyInsideModule = 0
# Dictionary with keys representing names defined. If two structures both
# have a member of type foo, we should still only define the TypeCode for
# foo once.
self.__defined_names = {}
# Dictionary of forward-declared structs/unions that have been
# used in sequence TypeCodes, but not yet defined.
self.__forwards_pending = {}
# Normally when walking over the tree we only consider things
# defined in the current file. However if we encounter a
# dependency between something in the current file and something
# defined elsewhere, we set the resolving_dependency flag and
# recurse again.
self.__resolving_dependency = 0
return self
# Places TypeCode symbol in appropriate namespace with a non-static const
# declaration (performs MSVC workaround)
def external_linkage(decl, mangled_name = ""):
assert isinstance(decl, idlast.DeclRepoId)
# Don't give external linkage if we met this declaration in
# resolving an out-of-file dependency
if self.__resolving_dependency:
return
where = bottomhalf
scopedName = id.Name(decl.scopedName())
scope = scopedName.scope()
tc_name = scopedName.prefix("_tc_")
tc_unscoped_name = tc_name.simple()
tc_name = tc_name.fullyQualify()
if mangled_name == "":
mangled_name = mangleName(config.state['Private Prefix'] + "_tc_",
decl.scopedName())
if alreadyDefined(tc_name):
return
defineName(tc_name)
# Needs the workaround if directly inside a module
if not self.__immediatelyInsideModule:
where.out("""\
const CORBA::TypeCode_ptr @tc_name@ = @mangled_name@;
""",
tc_name = tc_name, mangled_name = mangled_name)
return
open_namespace = ""
close_namespace = ""
for s in scope:
open_namespace = open_namespace + "namespace " + s + " { "
close_namespace = close_namespace + "} "
where.out(template.external_linkage,
open_namespace = open_namespace,
close_namespace = close_namespace,
tc_name = tc_name,
mangled_name = mangled_name,
tc_unscoped_name = tc_unscoped_name)
# Gets a TypeCode instance for a type
# Basic types have new typecodes generated, derived types are assumed
# to already exist and a name is passed instead
def mkTypeCode(type, declarator = None, node = None):
assert isinstance(type, types.Type)
prefix = "CORBA::TypeCode::PR_"
tctrack = ", &" + config.state['Private Prefix'] + "_tcTrack"
if declarator:
assert isinstance(declarator, idlast.Declarator)
dims = declarator.sizes()
pre_str = ""
post_str = ""
for dim in dims:
pre_str = pre_str + prefix + "array_tc(" + str(dim) + ", "
post_str = post_str + tctrack + ")"
return pre_str + mkTypeCode(type, None, node) + post_str
type = type.type()
basic = {
idltype.tk_short: "short",
idltype.tk_long: "long",
idltype.tk_ushort: "ushort",
idltype.tk_ulong: "ulong",
idltype.tk_float: "float",
idltype.tk_double: "double",
idltype.tk_boolean: "boolean",
idltype.tk_char: "char",
idltype.tk_wchar: "wchar",
idltype.tk_octet: "octet",
idltype.tk_any: "any",
idltype.tk_TypeCode: "TypeCode",
idltype.tk_longlong: "longlong",
idltype.tk_ulonglong: "ulonglong",
idltype.tk_longdouble: "longdouble"
}
if basic.has_key(type.kind()):
return prefix + basic[type.kind()] + "_tc()"
if isinstance(type, idltype.Base):
util.fatalError("Internal error generating TypeCode data")
if isinstance(type, idltype.String):
return prefix + "string_tc(" + str(type.bound()) + tctrack + ")"
if isinstance(type, idltype.WString):
return prefix + "wstring_tc(" + str(type.bound()) + tctrack + ")"
if isinstance(type, idltype.Sequence):
seqType = type.seqType()
if isinstance(seqType, idltype.Declared):
decl = seqType.decl()
if hasattr(decl, "recursive") and decl.recursive() and \
currently_being_defined(decl):
depth = recursive_Depth(decl)
return prefix + "recursive_sequence_tc(" +\
str(type.bound()) + ", " + str(depth) + tctrack + ")"
startingNode(type)
ret = prefix + "sequence_tc(" + str(type.bound()) + ", " +\
mkTypeCode(types.Type(type.seqType())) + tctrack + ")"
finishingNode()
return ret
if isinstance(type, idltype.Fixed):
return (prefix + "fixed_tc(%d,%d%s)" %
(type.digits(),type.scale(),tctrack))
assert isinstance(type, idltype.Declared)
if type.kind() == idltype.tk_objref:
scopedName = type.decl().scopedName()
if scopedName == ["CORBA", "Object"]:
return prefix + "Object_tc()"
scopedName = id.Name(scopedName)
repoID = type.decl().repoId()
iname = scopedName.simple()
return (prefix + 'interface_tc("' + repoID + '", "' +
iname + '"' + tctrack + ')')
elif type.kind() == idltype.tk_abstract_interface:
scopedName = id.Name(type.decl().scopedName())
repoID = type.decl().repoId()
iname = scopedName.simple()
return (prefix + 'abstract_interface_tc("' + repoID + '", "' +
iname + '"' + tctrack + ')')
elif type.kind() == idltype.tk_local_interface:
scopedName = id.Name(type.decl().scopedName())
repoID = type.decl().repoId()
iname = scopedName.simple()
return (prefix + 'local_interface_tc("' + repoID + '", "' +
iname + '"' + tctrack + ')')
guard_name = id.Name(type.scopedName()).guard()
return config.state['Private Prefix'] + "_tc_" + guard_name
# ---------------------------------------------------------------
# Tree-walking part of module starts here
# ---------------------------------------------------------------
# Control arrives here
#
def visitAST(node):
self.__completedModules = {}
for n in node.declarations():
if ast.shouldGenerateCodeForDecl(n):
n.accept(self)
def visitModule(node):
slash_scopedName = '/'.join(node.scopedName())
if self.__completedModules.has_key(slash_scopedName):
return
self.__completedModules[slash_scopedName] = 1
# This has a bearing on making symbols externally visible/ linkable
insideModule = self.__immediatelyInsideModule
self.__immediatelyInsideModule = 1
for n in node.definitions():
n.accept(self)
# Treat a reopened module as if it had been defined all at once
for c in node.continuations():
slash_scopedName = '/'.join(c.scopedName())
self.__completedModules[slash_scopedName] = 1
for n in c.definitions():
n.accept(self)
self.__immediatelyInsideModule = insideModule
# builds an instance of CORBA::PR_structMember containing pointers
# to all the TypeCodes of the structure members
def buildMembersStructure(node):
struct = output.StringStream()
mangled_name = mangleName(config.state['Private Prefix'] + \
"_structmember_", node.scopedName())
if alreadyDefined(mangled_name):
# no need to regenerate
return struct
defineName(mangled_name)
members = node.members()
array = []
for m in members:
memberType = types.Type(m.memberType())
for d in m.declarators():
this_name = id.Name(d.scopedName()).simple()
typecode = mkTypeCode(memberType, d, node)
array.append( "{\"" + this_name + "\", " + typecode + "}" )
if len(members) > 0:
struct.out("""\
static CORBA::PR_structMember @mangled_name@[] = {
@members@
};""", members = ",\n".join(array), mangled_name = mangled_name)
return struct
# Convenience function to total up the number of members, treating
# declarators separately.
def numMembers(node):
members = node.members()
num = 0
for m in members:
num = num + len(m.declarators())
return num
def visitStruct(node):
startingNode(node)
# the key here is to redirect the bottom half to a buffer
# just for now
oldbottomhalf = self.bottomhalf
self.bottomhalf = output.StringStream()
insideModule = self.__immediatelyInsideModule
self.__immediatelyInsideModule = 0
# create the static typecodes for constructed types by setting
# the resolving_dependency flag and recursing
save_resolving_dependency = self.__resolving_dependency
for child in node.members():
memberType = child.memberType()
if child.constrType():
self.__resolving_dependency = save_resolving_dependency
else:
self.__resolving_dependency = 1
if isinstance(memberType, idltype.Declared):
memberType.decl().accept(self)
elif isinstance(memberType, idltype.Sequence):
# anonymous sequence (maybe sequence<sequence<...<T>>>)
# Find the ultimate base type, and if it's user declared then
# produce a typecode definition for it.
base_type = memberType.seqType()
while isinstance(base_type, idltype.Sequence):
base_type = base_type.seqType()
# if a struct is recursive, don't loop forever :)
if isinstance(base_type, idltype.Declared):
decl = base_type.decl()
if not currently_being_defined(decl):
base_type.decl().accept(self)
self.__resolving_dependency = save_resolving_dependency
tophalf.out(str(buildMembersStructure(node)))
scopedName = node.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] + "_tc_",
scopedName)
if not alreadyDefined(mangled_name):
# only define the name once
defineName(mangled_name)
structmember_mangled_name =\
mangleName(config.state['Private Prefix'] + \
"_structmember_", scopedName)
assert alreadyDefined(structmember_mangled_name)
num = numMembers(node)
repoID = node.repoId()
struct_name = id.Name(scopedName).simple()
tophalf.out("""\
#ifdef @mangled_name@
# undef @mangled_name@
#endif
static CORBA::TypeCode_ptr @mangled_name@ = CORBA::TypeCode::PR_struct_tc("@repoID@", "@name@", @structmember_mangled_name@, @n@, &@pprefix@_tcTrack);
""",
mangled_name = mangled_name,
structmember_mangled_name = structmember_mangled_name,
name = struct_name, n = str(num),
repoID = repoID, pprefix=config.state['Private Prefix'])
self.__immediatelyInsideModule = insideModule
external_linkage(node)
# restore the old bottom half
oldbottomhalf.out(str(self.bottomhalf))
self.bottomhalf = oldbottomhalf
finishingNode()
def visitStructForward(node):
scopedName = node.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] +
"_tc_", scopedName)
fmangled_name = mangleName(config.state['Private Prefix'] +
"_ft_", scopedName)
if not alreadyDefined(fmangled_name):
defineName(fmangled_name)
tophalf.out("""\
static CORBA::TypeCode_ptr @fmangled_name@ = CORBA::TypeCode::PR_forward_tc("@repoId@", &@pprefix@_tcTrack);
#define @mangled_name@ @fmangled_name@
""",
mangled_name = mangled_name,
fmangled_name = fmangled_name,
repoId = node.repoId(),
pprefix=config.state['Private Prefix'])
def visitUnion(node):
scopedName = node.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] +\
"_tc_", scopedName)
if alreadyDefined(mangled_name):
return
startingNode(node)
# the key here is to redirect the bottom half to a buffer
# just for now
oldbottomhalf = self.bottomhalf
self.bottomhalf = output.StringStream()
insideModule = self.__immediatelyInsideModule
self.__immediatelyInsideModule = 0
# need to build a static array of node members in a similar fashion
# to structs
array = []
switchType = types.Type(node.switchType())
deref_switchType = switchType.deref()
if isinstance(switchType.type(), idltype.Declared):
save_resolving_dependency = self.__resolving_dependency
if not node.constrType():
self.__resolving_dependency = 1
switchType.type().decl().accept(self)
self.__resolving_dependency = save_resolving_dependency
numlabels = 0
numcases = 0
hasDefault = None
for c in node.cases():
numcases = numcases + 1
decl = c.declarator()
caseType = types.Type(c.caseType())
save_resolving_dependency = self.__resolving_dependency
if not c.constrType():
self.__resolving_dependency = 1
if isinstance(caseType.type(), idltype.Declared):
caseType.type().decl().accept(self)
elif caseType.sequence():
# anonymous sequence
seqType = caseType.type().seqType()
while isinstance(seqType, idltype.Sequence):
seqType = seqType.seqType()
if isinstance(seqType, idltype.Declared):
# don't loop forever
if not currently_being_defined(seqType.decl()):
seqType.decl().accept(self)
self.__resolving_dependency = save_resolving_dependency
typecode = mkTypeCode(caseType, decl, node)
case_name = id.Name(decl.scopedName()).simple()
for l in c.labels():
if l.default():
label = "0"
hasDefault = numlabels
else:
label = switchType.literal(l.value())
array.append('{"%s", %s, (CORBA::PR_unionDiscriminator)%s}' %
(case_name, typecode, label))
numlabels = numlabels + 1
discrim_tc = mkTypeCode(deref_switchType)
repoID = node.repoId()
union_name = id.Name(scopedName).simple()
unionmember_mangled_name = mangleName(config.state['Private Prefix'] +
"_unionMember_", scopedName)
default_str = ""
if hasDefault is None:
default_str = "-1"
else:
default_str = str(hasDefault)
tophalf.out("""\
static CORBA::PR_unionMember @unionmember_mangled_name@[] = {
@members@
};
#ifdef @mangled_name@
# undef @mangled_name@
#endif
static CORBA::TypeCode_ptr @mangled_name@ = CORBA::TypeCode::PR_union_tc("@repoID@", "@name@", @discrim_tc@, @unionmember_mangled_name@, @labels@, @default_str@, &@pprefix@_tcTrack);""",
mangled_name = mangled_name,
repoID = repoID,
discrim_tc = discrim_tc,
unionmember_mangled_name = unionmember_mangled_name,
name = union_name,
labels = str(numlabels),
default_str = default_str,
members = ",\n".join(array),
pprefix = config.state['Private Prefix'])
defineName(unionmember_mangled_name)
defineName(mangled_name)
self.__immediatelyInsideModule = insideModule
external_linkage(node)
# restore the old bottom half
oldbottomhalf.out(str(self.bottomhalf))
self.bottomhalf = oldbottomhalf
finishingNode()
def visitUnionForward(node):
scopedName = node.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] +
"_tc_", scopedName)
fmangled_name = mangleName(config.state['Private Prefix'] +
"_ft_", scopedName)
if not alreadyDefined(fmangled_name):
defineName(fmangled_name)
tophalf.out("""\
static CORBA::TypeCode_ptr @fmangled_name@ = CORBA::TypeCode::PR_forward_tc("@repoId@", &@pprefix@_tcTrack);
#define @mangled_name@ @fmangled_name@
""",
mangled_name = mangled_name,
fmangled_name = fmangled_name,
repoId = node.repoId(),
pprefix=config.state['Private Prefix'])
def visitEnum(node):
scopedName = node.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] +\
"_tc_", scopedName)
if alreadyDefined(mangled_name):
return
enumerators = node.enumerators()
names = []
for enumerator in enumerators:
names.append('"' + id.Name(enumerator.scopedName()).simple(cxx=0) +
'"')
enum_name = id.Name(scopedName).simple()
repoID = node.repoId()
tc_name = id.Name(scopedName).prefix("_tc_").fullyQualify()
enummember_mangled_name = mangleName(config.state['Private Prefix'] +
"_enumMember_", scopedName)
tophalf.out("""\
static const char* @enummember_mangled_name@[] = { @elements@ };
static CORBA::TypeCode_ptr @mangled_name@ = CORBA::TypeCode::PR_enum_tc("@repoID@", "@name@", @enummember_mangled_name@, @numcases@, &@pprefix@_tcTrack);""",
enummember_mangled_name = enummember_mangled_name,
mangled_name = mangled_name,
elements = ", ".join(names),
repoID = repoID,
name = enum_name,
numcases = str(len(names)),
pprefix = config.state['Private Prefix'])
defineName(mangled_name)
defineName(enummember_mangled_name)
external_linkage(node)
def visitForward(node):
pass
def visitInterface(node):
if node.builtIn(): return
# interfaces containing members with the type of the interface
# cause a minor (non fatal) problem with ordering of the outputted
# declarations. This check only serves to correct this cosmetic flaw
# and make the output of the new system identical to the old one.
if hasattr(node, "typecode_already_been_here"):
return
node.typecode_already_been_here = 1
startingNode(node)
insideModule = self.__immediatelyInsideModule
self.__immediatelyInsideModule = 0
for n in node.declarations():
n.accept(self)
self.__immediatelyInsideModule = insideModule
repoID = node.repoId()
iname = id.Name(node.scopedName()).simple()
if node.abstract():
func = "PR_abstract_interface_tc"
elif node.local():
func = "PR_local_interface_tc"
else:
func = "PR_interface_tc"
typecode = 'CORBA::TypeCode::' + func + '("' + repoID + '", "' +\
iname + '", &' + config.state['Private Prefix'] + '_tcTrack)'
external_linkage(node, typecode)
finishingNode()
def recurse(type, constr=0):
assert isinstance(type, types.Type)
deref_type = type.deref()
if isinstance(type.type(), idltype.Declared):
base_decl = type.type().decl()
save_resolving_dependency = self.__resolving_dependency
if not constr:
self.__resolving_dependency = 1
base_decl.accept(self)
self.__resolving_dependency = save_resolving_dependency
elif deref_type.sequence():
seqType = deref_type.type().seqType()
if isinstance(seqType, idltype.Declared):
base_decl = seqType.decl()
save_resolving_dependency = self.__resolving_dependency
self.__resolving_dependency = 1
base_decl.accept(self)
self.__resolving_dependency = save_resolving_dependency
elif types.Type(seqType).sequence():
# anonymous sequence
recurse(types.Type(seqType.seqType()))
def visitDeclarator(declarator):
# this must be a typedef declarator
node = declarator.alias()
aliasType = types.Type(node.aliasType())
recurse(aliasType)
scopedName = declarator.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] +
"_tc_", scopedName)
if alreadyDefined(mangled_name):
return
repoID = declarator.repoId()
typecode = mkTypeCode(aliasType, declarator)
scopedName = declarator.scopedName()
typedef_name = id.Name(scopedName).simple()
tophalf.out("""\
static CORBA::TypeCode_ptr @mangled_name@ = CORBA::TypeCode::PR_alias_tc("@repoID@", "@name@", @typecode@, &@pprefix@_tcTrack);
""",
mangled_name = mangled_name,
repoID = repoID,
name = typedef_name,
typecode = typecode,
pprefix = config.state['Private Prefix'])
defineName(mangled_name)
external_linkage(declarator)
def visitTypedef(node):
aliasType = types.Type(node.aliasType())
recurse(aliasType, node.constrType())
for declarator in node.declarators():
declarator.accept(self)
def visitConst(node):
pass
def visitException(node):
scopedName = node.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] +
"_tc_", scopedName)
if alreadyDefined(mangled_name):
return
defineName(mangled_name)
startingNode(node)
# the key here is to redirect the bottom half to a buffer
# just for now
oldbottomhalf = self.bottomhalf
self.bottomhalf = output.StringStream()
# create the static typecodes for constructed types by setting
# the resolving_dependency flag and recursing
save_resolving_dependency = self.__resolving_dependency
insideModule = self.__immediatelyInsideModule
self.__immediatelyInsideModule = 0
for child in node.members():
memberType = child.memberType()
if child.constrType():
self.__resolving_dependency = save_resolving_dependency
else:
self.__resolving_dependency = 1
if isinstance(memberType, idltype.Declared):
memberType.decl().accept(self)
self.__resolving_dependency = save_resolving_dependency
self.__immediatelyInsideModule = insideModule
# output the structure of members
tophalf.out(str(buildMembersStructure(node)))
num = numMembers(node)
repoID = node.repoId()
ex_name = id.Name(scopedName).simple()
structmember_mangled_name = mangleName(config.state['Private Prefix'] + \
"_structmember_", scopedName)
if num == 0:
structmember_mangled_name = "(CORBA::PR_structMember*) 0"
tophalf.out("""\
static CORBA::TypeCode_ptr @mangled_name@ = CORBA::TypeCode::PR_exception_tc("@repoID@", "@name@", @structmember_mangled_name@, @n@, &@pprefix@_tcTrack);""",
mangled_name = mangled_name,
name = ex_name, n = str(num),
structmember_mangled_name = structmember_mangled_name,
repoID = repoID,
pprefix = config.state['Private Prefix'])
external_linkage(node)
# restore the old bottom half
oldbottomhalf.out(str(self.bottomhalf))
self.bottomhalf = oldbottomhalf
finishingNode()
# builds an instance of CORBA::PR_valueMember containing pointers
# to all the TypeCodes of the value statemembers
def buildStateMembersStructure(node):
struct = output.StringStream()
mangled_name = mangleName(config.state['Private Prefix'] + \
"_valuemember_", node.scopedName())
if alreadyDefined(mangled_name):
# no need to regenerate
return struct
defineName(mangled_name)
members = node.statemembers()
array = []
if members:
for m in members:
memberType = types.Type(m.memberType())
access = m.memberAccess()
for d in m.declarators():
this_name = id.Name(d.scopedName()).simple()
typecode = mkTypeCode(memberType, d, node)
array.append('{"%s", %s, %d}' % (this_name, typecode, access))
struct.out("""\
static CORBA::PR_valueMember @mangled_name@[] = {
@members@
};""", members = ",\n".join(array), mangled_name = mangled_name)
else:
struct.out("""\
static CORBA::PR_valueMember* @mangled_name@ = 0;""",
mangled_name=mangled_name)
return struct
# Convenience function to total up the number of members, treating
# declarators separately.
def numStateMembers(node):
members = node.statemembers()
num = 0
for m in members:
num = num + len(m.declarators())
return num
def visitValue(node):
# Used for abstract value too
startingNode(node)
scopedName = node.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] +
"_tc_", scopedName)
visitValueForward(node)
# the key here is to redirect the bottom half to a buffer
# just for now
oldbottomhalf = self.bottomhalf
self.bottomhalf = output.StringStream()
insideModule = self.__immediatelyInsideModule
self.__immediatelyInsideModule = 0
# handle nested types
for n in node.declarations():
n.accept(self)
# create the static typecodes for constructed types by setting
# the resolving_dependency flag and recursing
save_resolving_dependency = self.__resolving_dependency
for child in node.statemembers():
memberType = child.memberType()
if child.constrType():
self.__resolving_dependency = save_resolving_dependency
else:
self.__resolving_dependency = 1
if isinstance(memberType, idltype.Declared):
decl = memberType.decl()
if not currently_being_defined(decl):
decl.accept(self)
elif isinstance(memberType, idltype.Sequence):
# anonymous sequence (maybe sequence<sequence<...<T>>>)
# Find the ultimate base type, and if it's user declared then
# produce a typecode definition for it.
base_type = memberType.seqType()
while isinstance(base_type, idltype.Sequence):
base_type = base_type.seqType()
if isinstance(base_type, idltype.Declared):
decl = base_type.decl()
if not currently_being_defined(decl):
decl.accept(self)
self.__resolving_dependency = save_resolving_dependency
tophalf.out(str(buildStateMembersStructure(node)))
if not alreadyDefined(mangled_name):
# only define the name once
defineName(mangled_name)
valuemember_mangled_name = mangleName(config.state['Private Prefix'] +
"_valuemember_", scopedName)
assert alreadyDefined(valuemember_mangled_name)
num = numStateMembers(node)
repoID = node.repoId()
value_name = id.Name(scopedName).simple()
# Value modifiers
modifierl = []
if isinstance(node, idlast.Value):
if node.custom():
modifierl.append("CORBA::VM_CUSTOM")
if node.truncatable():
modifierl.append("CORBA::VM_TRUNCATABLE")
else:
assert isinstance(node, idlast.ValueAbs)
modifierl.append("CORBA::VM_ABSTRACT")
if modifierl:
modifiers = "|".join(modifierl)
else:
modifiers = "CORBA::VM_NONE"
# Concrete base
inherits = node.inherits()
if (isinstance(node, idlast.Value) and
inherits and isinstance(inherits[0], idlast.Value)):
visitValueForward(inherits[0])
bscopedName = inherits[0].scopedName()
concrete_base = mangleName(config.state['Private Prefix'] +
"_tc_", bscopedName)
else:
concrete_base = "CORBA::TypeCode::PR_null_tc()"
tophalf.out("""\
#ifdef @mangled_name@
# undef @mangled_name@
#endif
static CORBA::TypeCode_ptr @mangled_name@ = CORBA::TypeCode::PR_value_tc("@repoID@", "@name@", @modifiers@, @concrete_base@, @valuemember_mangled_name@, @n@, &@pprefix@_tcTrack);
""",
mangled_name = mangled_name,
modifiers = modifiers,
concrete_base = concrete_base,
valuemember_mangled_name = valuemember_mangled_name,
name = value_name, n = str(num),
repoID = repoID, pprefix=config.state['Private Prefix'])
self.__immediatelyInsideModule = insideModule
external_linkage(node)
# restore the old bottom half
oldbottomhalf.out(str(self.bottomhalf))
self.bottomhalf = oldbottomhalf
finishingNode()
def visitValueForward(node):
scopedName = node.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] +
"_tc_", scopedName)
fmangled_name = mangleName(config.state['Private Prefix'] +
"_ft_", scopedName)
if not alreadyDefined(fmangled_name):
defineName(fmangled_name)
tophalf.out("""\
static CORBA::TypeCode_ptr @fmangled_name@ = CORBA::TypeCode::PR_forward_tc("@repoId@", &@pprefix@_tcTrack);
#define @mangled_name@ @fmangled_name@
""",
mangled_name = mangled_name,
fmangled_name = fmangled_name,
repoId = node.repoId(),
pprefix=config.state['Private Prefix'])
def visitValueAbs(node):
visitValue(node)
def visitValueBox(node):
boxedType = types.Type(node.boxedType())
recurse(boxedType, node.constrType())
scopedName = node.scopedName()
mangled_name = mangleName(config.state['Private Prefix'] +\
"_tc_", scopedName)
if alreadyDefined(mangled_name):
return
repoID = node.repoId()
typecode = mkTypeCode(boxedType)
scopedName = node.scopedName()
boxed_name = id.Name(scopedName).simple()
tophalf.out("""\
static CORBA::TypeCode_ptr @mangled_name@ = CORBA::TypeCode::PR_value_box_tc("@repoID@", "@name@", @typecode@, &@pprefix@_tcTrack);
""",
mangled_name = mangled_name,
repoID = repoID,
name = boxed_name,
typecode = typecode,
pprefix = config.state['Private Prefix'])
defineName(mangled_name)
external_linkage(node)
| yeewang/omniORB | src/lib/omniORB/python/omniidl_be/cxx/dynskel/typecode.py | Python | gpl-2.0 | 35,087 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import time
from swift import gettext_ as _
from swift import __version__ as swiftver
from swift.common.storage_policy import POLICIES
from swift.common.swob import Request, Response
from swift.common.utils import get_logger, config_true_value, json, \
SWIFT_CONF_FILE
from swift.common.constraints import check_mount
from resource import getpagesize
from hashlib import md5
class ReconMiddleware(object):
"""
Recon middleware used for monitoring.
/recon/load|mem|async... will return various system metrics.
Needs to be added to the pipeline and requires a filter
declaration in the object-server.conf:
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
"""
def __init__(self, app, conf, *args, **kwargs):
self.app = app
self.devices = conf.get('devices', '/srv/node')
swift_dir = conf.get('swift_dir', '/etc/swift')
self.logger = get_logger(conf, log_route='recon')
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.object_recon_cache = os.path.join(self.recon_cache_path,
'object.recon')
self.container_recon_cache = os.path.join(self.recon_cache_path,
'container.recon')
self.account_recon_cache = os.path.join(self.recon_cache_path,
'account.recon')
self.drive_recon_cache = os.path.join(self.recon_cache_path,
'drive.recon')
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
self.rings = [self.account_ring_path, self.container_ring_path]
# include all object ring files (for all policies)
for policy in POLICIES:
self.rings.append(os.path.join(swift_dir,
policy.ring_name + '.ring.gz'))
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
def _from_recon_cache(self, cache_keys, cache_file, openr=open):
"""retrieve values from a recon cache file
:params cache_keys: list of cache items to retrieve
:params cache_file: cache file to retrieve items from.
:params openr: open to use [for unittests]
:return: dict of cache items and their values or none if not found
"""
try:
with openr(cache_file, 'r') as f:
recondata = json.load(f)
return dict((key, recondata.get(key)) for key in cache_keys)
except IOError:
self.logger.exception(_('Error reading recon cache file'))
except ValueError:
self.logger.exception(_('Error parsing recon cache file'))
except Exception:
self.logger.exception(_('Error retrieving recon data'))
return dict((key, None) for key in cache_keys)
def get_version(self):
"""get swift version"""
verinfo = {'version': swiftver}
return verinfo
def get_mounted(self, openr=open):
"""get ALL mounted fs from /proc/mounts"""
mounts = []
with openr('/proc/mounts', 'r') as procmounts:
for line in procmounts:
mount = {}
mount['device'], mount['path'], opt1, opt2, opt3, \
opt4 = line.rstrip().split()
mounts.append(mount)
return mounts
def get_load(self, openr=open):
"""get info from /proc/loadavg"""
loadavg = {}
with openr('/proc/loadavg', 'r') as f:
onemin, fivemin, ftmin, tasks, procs = f.read().rstrip().split()
loadavg['1m'] = float(onemin)
loadavg['5m'] = float(fivemin)
loadavg['15m'] = float(ftmin)
loadavg['tasks'] = tasks
loadavg['processes'] = int(procs)
return loadavg
def get_mem(self, openr=open):
"""get info from /proc/meminfo"""
meminfo = {}
with openr('/proc/meminfo', 'r') as memlines:
for i in memlines:
entry = i.rstrip().split(":")
meminfo[entry[0]] = entry[1].strip()
return meminfo
def get_async_info(self):
"""get # of async pendings"""
return self._from_recon_cache(['async_pending'],
self.object_recon_cache)
def get_driveaudit_error(self):
"""get # of drive audit errors"""
return self._from_recon_cache(['drive_audit_errors'],
self.drive_recon_cache)
def get_replication_info(self, recon_type):
"""get replication info"""
replication_list = ['replication_time',
'replication_stats',
'replication_last']
if recon_type == 'account':
return self._from_recon_cache(replication_list,
self.account_recon_cache)
elif recon_type == 'container':
return self._from_recon_cache(replication_list,
self.container_recon_cache)
elif recon_type == 'object':
replication_list += ['object_replication_time',
'object_replication_last']
return self._from_recon_cache(replication_list,
self.object_recon_cache)
else:
return None
def get_device_info(self):
"""get devices"""
try:
return {self.devices: os.listdir(self.devices)}
except Exception:
self.logger.exception(_('Error listing devices'))
return {self.devices: None}
def get_updater_info(self, recon_type):
"""get updater info"""
if recon_type == 'container':
return self._from_recon_cache(['container_updater_sweep'],
self.container_recon_cache)
elif recon_type == 'object':
return self._from_recon_cache(['object_updater_sweep'],
self.object_recon_cache)
else:
return None
def get_expirer_info(self, recon_type):
"""get expirer info"""
if recon_type == 'object':
return self._from_recon_cache(['object_expiration_pass',
'expired_last_pass'],
self.object_recon_cache)
def get_auditor_info(self, recon_type):
"""get auditor info"""
if recon_type == 'account':
return self._from_recon_cache(['account_audits_passed',
'account_auditor_pass_completed',
'account_audits_since',
'account_audits_failed'],
self.account_recon_cache)
elif recon_type == 'container':
return self._from_recon_cache(['container_audits_passed',
'container_auditor_pass_completed',
'container_audits_since',
'container_audits_failed'],
self.container_recon_cache)
elif recon_type == 'object':
return self._from_recon_cache(['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
self.object_recon_cache)
else:
return None
def get_unmounted(self):
"""list unmounted (failed?) devices"""
mountlist = []
for entry in os.listdir(self.devices):
try:
mounted = check_mount(self.devices, entry)
except OSError as err:
mounted = str(err)
mpoint = {'device': entry, 'mounted': mounted}
if mpoint['mounted'] is not True:
mountlist.append(mpoint)
return mountlist
def get_diskusage(self):
"""get disk utilization statistics"""
devices = []
for entry in os.listdir(self.devices):
try:
mounted = check_mount(self.devices, entry)
except OSError as err:
devices.append({'device': entry, 'mounted': str(err),
'size': '', 'used': '', 'avail': ''})
continue
if mounted:
path = os.path.join(self.devices, entry)
disk = os.statvfs(path)
capacity = disk.f_bsize * disk.f_blocks
available = disk.f_bsize * disk.f_bavail
used = disk.f_bsize * (disk.f_blocks - disk.f_bavail)
devices.append({'device': entry, 'mounted': True,
'size': capacity, 'used': used,
'avail': available})
else:
devices.append({'device': entry, 'mounted': False,
'size': '', 'used': '', 'avail': ''})
return devices
def get_ring_md5(self, openr=open):
"""get all ring md5sum's"""
sums = {}
for ringfile in self.rings:
md5sum = md5()
if os.path.exists(ringfile):
try:
with openr(ringfile, 'rb') as f:
block = f.read(4096)
while block:
md5sum.update(block)
block = f.read(4096)
sums[ringfile] = md5sum.hexdigest()
except IOError as err:
sums[ringfile] = None
if err.errno != errno.ENOENT:
self.logger.exception(_('Error reading ringfile'))
return sums
def get_swift_conf_md5(self, openr=open):
"""get md5 of swift.conf"""
md5sum = md5()
try:
with openr(SWIFT_CONF_FILE, 'r') as fh:
chunk = fh.read(4096)
while chunk:
md5sum.update(chunk)
chunk = fh.read(4096)
except IOError as err:
if err.errno != errno.ENOENT:
self.logger.exception(_('Error reading swift.conf'))
hexsum = None
else:
hexsum = md5sum.hexdigest()
return {SWIFT_CONF_FILE: hexsum}
def get_quarantine_count(self):
"""get obj/container/account quarantine counts"""
qcounts = {"objects": 0, "containers": 0, "accounts": 0,
"policies": {}}
qdir = "quarantined"
for device in os.listdir(self.devices):
qpath = os.path.join(self.devices, device, qdir)
if os.path.exists(qpath):
for qtype in os.listdir(qpath):
qtgt = os.path.join(qpath, qtype)
linkcount = os.lstat(qtgt).st_nlink
if linkcount > 2:
if qtype.startswith('objects'):
if '-' in qtype:
pkey = qtype.split('-', 1)[1]
else:
pkey = '0'
qcounts['policies'].setdefault(pkey,
{'objects': 0})
qcounts['policies'][pkey]['objects'] \
+= linkcount - 2
qcounts['objects'] += linkcount - 2
else:
qcounts[qtype] += linkcount - 2
return qcounts
def get_socket_info(self, openr=open):
"""
get info from /proc/net/sockstat and sockstat6
Note: The mem value is actually kernel pages, but we return bytes
allocated based on the systems page size.
"""
sockstat = {}
try:
with openr('/proc/net/sockstat', 'r') as proc_sockstat:
for entry in proc_sockstat:
if entry.startswith("TCP: inuse"):
tcpstats = entry.split()
sockstat['tcp_in_use'] = int(tcpstats[2])
sockstat['orphan'] = int(tcpstats[4])
sockstat['time_wait'] = int(tcpstats[6])
sockstat['tcp_mem_allocated_bytes'] = \
int(tcpstats[10]) * getpagesize()
except IOError as e:
if e.errno != errno.ENOENT:
raise
try:
with openr('/proc/net/sockstat6', 'r') as proc_sockstat6:
for entry in proc_sockstat6:
if entry.startswith("TCP6: inuse"):
sockstat['tcp6_in_use'] = int(entry.split()[2])
except IOError as e:
if e.errno != errno.ENOENT:
raise
return sockstat
def get_time(self):
"""get current time"""
return time.time()
def GET(self, req):
root, rcheck, rtype = req.split_path(1, 3, True)
all_rtypes = ['account', 'container', 'object']
if rcheck == "mem":
content = self.get_mem()
elif rcheck == "load":
content = self.get_load()
elif rcheck == "async":
content = self.get_async_info()
elif rcheck == 'replication' and rtype in all_rtypes:
content = self.get_replication_info(rtype)
elif rcheck == 'replication' and rtype is None:
# handle old style object replication requests
content = self.get_replication_info('object')
elif rcheck == "devices":
content = self.get_device_info()
elif rcheck == "updater" and rtype in ['container', 'object']:
content = self.get_updater_info(rtype)
elif rcheck == "auditor" and rtype in all_rtypes:
content = self.get_auditor_info(rtype)
elif rcheck == "expirer" and rtype == 'object':
content = self.get_expirer_info(rtype)
elif rcheck == "mounted":
content = self.get_mounted()
elif rcheck == "unmounted":
content = self.get_unmounted()
elif rcheck == "diskusage":
content = self.get_diskusage()
elif rcheck == "ringmd5":
content = self.get_ring_md5()
elif rcheck == "swiftconfmd5":
content = self.get_swift_conf_md5()
elif rcheck == "quarantined":
content = self.get_quarantine_count()
elif rcheck == "sockstat":
content = self.get_socket_info()
elif rcheck == "version":
content = self.get_version()
elif rcheck == "driveaudit":
content = self.get_driveaudit_error()
elif rcheck == "time":
content = self.get_time()
else:
content = "Invalid path: %s" % req.path
return Response(request=req, status="404 Not Found",
body=content, content_type="text/plain")
if content is not None:
return Response(request=req, body=json.dumps(content),
content_type="application/json")
else:
return Response(request=req, status="500 Server Error",
body="Internal server error.",
content_type="text/plain")
def __call__(self, env, start_response):
req = Request(env)
if req.path.startswith('/recon/'):
return self.GET(req)(env, start_response)
else:
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def recon_filter(app):
return ReconMiddleware(app, conf)
return recon_filter
| mjwtom/swift | swift/common/middleware/recon.py | Python | apache-2.0 | 16,726 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionPeeringsOperations(object):
"""ExpressRouteCrossConnectionPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCrossConnectionPeeringList"]
"""Gets all peerings in a specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionPeeringList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnectionPeeringList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeeringList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeeringList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCrossConnectionPeering"
"""Gets the specified peering for the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnectionPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnectionPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCrossConnectionPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCrossConnectionPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCrossConnectionPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCrossConnectionPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCrossConnectionPeering"]
"""Creates or updates a peering in the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
ExpressRouteCrossConnection peering operation.
:type peering_parameters: ~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnectionPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCrossConnectionPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.ExpressRouteCrossConnectionPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/operations/_express_route_cross_connection_peerings_operations.py | Python | mit | 22,763 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''Ripyl protocol decode library
can.py test suite
'''
# Copyright © 2013 Kevin Thibedeau
# This file is part of Ripyl.
# Ripyl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# Ripyl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with Ripyl. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
import unittest
import random
import ripyl.protocol.can as can
import ripyl.streaming as stream
import test.test_support as tsup
def gen_random_can_frame():
use_extended = random.choice((True, False))
if use_extended:
can_id = random.randrange(0, 2**29)
else:
can_id = random.randrange(0, 2**11)
data_count = random.randint(0,8)
data = [random.randint(0,0xFF) for b in xrange(data_count)]
ack_bit = random.choice((True, False))
if use_extended:
cf = can.CANExtendedFrame(can_id, data, ack=ack_bit)
else:
cf = can.CANStandardFrame(can_id, data, ack=ack_bit)
return cf
class TestCANFuncs(tsup.RandomSeededTestCase):
def test_can_decode(self):
self.test_name = 'CAN frame'
self.trial_count = 100
for i in xrange(self.trial_count):
self.update_progress(i+1)
frame_count = random.randint(3, 6)
frames = []
data_frame_count = 0
for i in xrange(frame_count):
use_ovl = random.random() < 0.1
if use_ovl:
flag_bits = random.randint(6, 12)
frames.append(can.CANOverloadFrame(flag_bits, ifs_bits=0))
else: # data and remote frames
frames.append(gen_random_can_frame())
data_frame_count += 1
use_err = random.random() < 0.1
if use_err:
flag_bits = random.randint(6, 12)
frames.append(can.CANErrorFrame(flag_bits, ifs_bits=0))
frames[-2].trim_bits = random.randint(1,5)
if data_frame_count < 3:
# Generate additional data and remote frames to help ensure there are enough
# edges for auto rate detection.
for i in xrange(3 - data_frame_count):
frames.append(gen_random_can_frame())
clock_freq = random.randint(10e3, 1e6)
ch, cl = can.can_synth(frames, clock_freq, idle_start=1.0e-5)
records = list(can.can_decode(cl, stream_type=stream.StreamType.Edges))
self.assertEqual(len(records), len(frames), 'Decoded frame count mismatch: {} -> {}'.format(len(frames), len(records)))
for r, o in zip(records, frames):
if r.data != o:
print('Mismatch:\n {}\n {}'.format(o, r.data))
print('### original frames:', clock_freq)
for o in frames:
print(' ', repr(o))
print('### decoded frames:')
for r in records:
print(' ', repr(r.data))
self.assertEqual(r.data, o, 'Frames are different')
| kevinpt/ripyl | test/test_can.py | Python | lgpl-3.0 | 3,681 |
from twisted.internet import protocol
from twisted.python import log
class PokerBotProcess(protocol.ProcessProtocol):
def __init__(self, game_key, bot, on_connect, logger):
self.game = game_key
self.bot = bot
self.on_connect = on_connect
self.logger = logger
def connectionMade(self):
self.on_connect()
def outReceived(self, data):
self.connection.tell_server(data)
def errReceived(self, data):
for line in data.split("\n"):
self.logger.debug(line)
def login(self):
print(" logging in...")
return "!login {} {}".format(self.game, self.bot.key)
def tell(self, line):
try:
self.transport.write(line + "\n")
except:
log.err()
def kill(self):
self.transport.signalProcess('KILL')
self.transport.loseConnection()
self.logger.done()
def register(self, connection):
print("Connection registered")
self.connection = connection
| gnmerritt/poker-plumbing | pipes/processes.py | Python | mit | 1,025 |
"""Let's Encrypt client crypto utility functions
.. todo:: Make the transition to use PSS rather than PKCS1_v1_5 when the server
is capable of handling the signatures.
"""
import logging
import os
import time
import Crypto.Hash.SHA256
import Crypto.PublicKey.RSA
import Crypto.Signature.PKCS1_v1_5
import M2Crypto
from letsencrypt.client import le_util
# High level functions
def init_save_key(key_size, key_dir, keyname="key-letsencrypt.pem"):
"""Initializes and saves a privkey.
Inits key and saves it in PEM format on the filesystem.
.. note:: keyname is the attempted filename, it may be different if a file
already exists at the path.
:param int key_size: RSA key size in bits
:param str key_dir: Key save directory.
:param str keyname: Filename of key
:returns: Key
:rtype: :class:`letsencrypt.client.le_util.Key`
:raises ValueError: If unable to generate the key given key_size.
"""
try:
key_pem = make_key(key_size)
except ValueError as err:
logging.fatal(str(err))
raise err
# Save file
le_util.make_or_verify_dir(key_dir, 0o700, os.geteuid())
key_f, key_path = le_util.unique_file(
os.path.join(key_dir, keyname), 0o600)
key_f.write(key_pem)
key_f.close()
logging.info("Generating key (%d bits): %s", key_size, key_path)
return le_util.Key(key_path, key_pem)
def init_save_csr(privkey, names, cert_dir, csrname="csr-letsencrypt.pem"):
"""Initialize a CSR with the given private key.
:param privkey: Key to include in the CSR
:type privkey: :class:`letsencrypt.client.le_util.Key`
:param set names: `str` names to include in the CSR
:param str cert_dir: Certificate save directory.
:returns: CSR
:rtype: :class:`letsencrypt.client.le_util.CSR`
"""
csr_pem, csr_der = make_csr(privkey.pem, names)
# Save CSR
le_util.make_or_verify_dir(cert_dir, 0o755)
csr_f, csr_filename = le_util.unique_file(
os.path.join(cert_dir, csrname), 0o644)
csr_f.write(csr_pem)
csr_f.close()
logging.info("Creating CSR: %s", csr_filename)
return le_util.CSR(csr_filename, csr_der, "der")
# Lower level functions
def make_csr(key_str, domains):
"""Generate a CSR.
:param str key_str: RSA key.
:param list domains: Domains included in the certificate.
:returns: new CSR in PEM and DER form containing all domains
:rtype: tuple
"""
assert domains, "Must provide one or more hostnames for the CSR."
rsa_key = M2Crypto.RSA.load_key_string(key_str)
pubkey = M2Crypto.EVP.PKey()
pubkey.assign_rsa(rsa_key)
csr = M2Crypto.X509.Request()
csr.set_pubkey(pubkey)
name = csr.get_subject()
name.C = "US"
name.ST = "Michigan"
name.L = "Ann Arbor"
name.O = "EFF"
name.OU = "University of Michigan"
name.CN = domains[0]
extstack = M2Crypto.X509.X509_Extension_Stack()
ext = M2Crypto.X509.new_extension(
"subjectAltName", ", ".join("DNS:%s" % d for d in domains))
extstack.push(ext)
csr.add_extensions(extstack)
csr.sign(pubkey, "sha256")
assert csr.verify(pubkey)
pubkey2 = csr.get_pubkey()
assert csr.verify(pubkey2)
return csr.as_pem(), csr.as_der()
# WARNING: the csr and private key file are possible attack vectors for TOCTOU
# We should either...
# A. Do more checks to verify that the CSR is trusted/valid
# B. Audit the parsing code for vulnerabilities
def valid_csr(csr):
"""Validate CSR.
Check if `csr` is a valid CSR for the given domains.
:param str csr: CSR in PEM.
:returns: Validity of CSR.
:rtype: bool
"""
try:
csr_obj = M2Crypto.X509.load_request_string(csr)
return bool(csr_obj.verify(csr_obj.get_pubkey()))
except M2Crypto.X509.X509Error:
return False
def csr_matches_pubkey(csr, privkey):
"""Does private key correspond to the subject public key in the CSR?
:param str csr: CSR in PEM.
:param str privkey: Private key file contents
:returns: Correspondence of private key to CSR subject public key.
:rtype: bool
"""
csr_obj = M2Crypto.X509.load_request_string(csr)
privkey_obj = M2Crypto.RSA.load_key_string(privkey)
return csr_obj.get_pubkey().get_rsa().pub() == privkey_obj.pub()
def make_key(bits):
"""Generate PEM encoded RSA key.
:param int bits: Number of bits, at least 1024.
:returns: new RSA key in PEM form with specified number of bits
:rtype: str
"""
return Crypto.PublicKey.RSA.generate(bits).exportKey(format="PEM")
def valid_privkey(privkey):
"""Is valid RSA private key?
:param str privkey: Private key file contents
:returns: Validity of private key.
:rtype: bool
"""
try:
return bool(M2Crypto.RSA.load_key_string(privkey).check_key())
except M2Crypto.RSA.RSAError:
return False
def make_ss_cert(key_str, domains, not_before=None,
validity=(7 * 24 * 60 * 60)):
"""Returns new self-signed cert in PEM form.
Uses key_str and contains all domains.
"""
assert domains, "Must provide one or more hostnames for the CSR."
rsa_key = M2Crypto.RSA.load_key_string(key_str)
pubkey = M2Crypto.EVP.PKey()
pubkey.assign_rsa(rsa_key)
cert = M2Crypto.X509.X509()
cert.set_pubkey(pubkey)
cert.set_serial_number(1337)
cert.set_version(2)
current_ts = long(time.time() if not_before is None else not_before)
current = M2Crypto.ASN1.ASN1_UTCTIME()
current.set_time(current_ts)
expire = M2Crypto.ASN1.ASN1_UTCTIME()
expire.set_time(current_ts + validity)
cert.set_not_before(current)
cert.set_not_after(expire)
subject = cert.get_subject()
subject.C = "US"
subject.ST = "Michigan"
subject.L = "Ann Arbor"
subject.O = "University of Michigan and the EFF"
subject.CN = domains[0]
cert.set_issuer(cert.get_subject())
if len(domains) > 1:
cert.add_ext(M2Crypto.X509.new_extension(
"basicConstraints", "CA:FALSE"))
cert.add_ext(M2Crypto.X509.new_extension(
"subjectAltName", ", ".join(["DNS:%s" % d for d in domains])))
cert.sign(pubkey, "sha256")
assert cert.verify(pubkey)
assert cert.verify()
# print check_purpose(,0
return cert.as_pem()
| diracdeltas/lets-encrypt-preview | letsencrypt/client/crypto_util.py | Python | apache-2.0 | 6,343 |
# -*- coding: utf-8 -*-
import smtplib
from email.mime.text import MIMEText
from email.Utils import formatdate
from logbook import Logger
from thekraken.settings import MAIL
log = Logger('thekraken.report')
def send_report(subject, body):
"""Informs about an error"""
msg = MIMEText(body, 'html', _charset='utf-8')
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg['From'] = ', '.join(MAIL['sender'])
msg['To'] = ', '.join(MAIL['recipients'])
try:
s = smtplib.SMTP(MAIL['smtp_server'])
except:
log.exception()
else:
try:
s.login(MAIL['user'], MAIL['password'])
s.sendmail(MAIL['sender'], MAIL['recipients'], msg.as_string())
except:
log.exception()
finally:
s.close()
| sharkerz/thekraken | thekraken/report.py | Python | gpl-2.0 | 820 |
from tapiriik.settings import WEB_ROOT
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit
from tapiriik.services.api import APIException, UserException, UserExceptionType, APIExcludeActivity
from tapiriik.services.fit import FITIO
from tapiriik.services.pwx import PWXIO
from tapiriik.services.tcx import TCXIO
from tapiriik.services.gpx import GPXIO
from lxml import etree
from django.core.urlresolvers import reverse
from datetime import datetime, timedelta
import dateutil.parser
import requests
import time
import json
import os
import logging
logger = logging.getLogger(__name__)
class VeloHeroService(ServiceBase):
ID = "velohero"
DisplayName = "Velo Hero"
DisplayAbbreviation = "VH"
_urlRoot = "http://app.velohero.com"
AuthenticationType = ServiceAuthenticationType.UsernamePassword
RequiresExtendedAuthorizationDetails = True
ReceivesStationaryActivities = False
SupportsHR = SupportsCadence = SupportsTemp = SupportsPower = True
# http://app.velohero.com/sports/list?view=json
# For mapping common -> Velo Hero
_activityMappings = {
ActivityType.Cycling: "1",
ActivityType.Running: "2",
ActivityType.Swimming: "3",
ActivityType.Gym: "4",
ActivityType.Elliptical: "4",
ActivityType.Skating: "4",
ActivityType.Rowing: "11",
ActivityType.MountainBiking: "6",
ActivityType.Hiking: "7",
ActivityType.Walking: "7",
ActivityType.Snowboarding: "8",
ActivityType.CrossCountrySkiing: "8",
ActivityType.DownhillSkiing: "8",
ActivityType.Climbing: "7",
ActivityType.Wheelchair: "0",
ActivityType.Other: "0"
}
# For mapping Velo Hero -> common
_reverseActivityMappings={
"0": ActivityType.Other, # No Sport
"1": ActivityType.Cycling,
"2": ActivityType.Running,
"3": ActivityType.Swimming,
"4": ActivityType.Gym,
"5": ActivityType.Other, # Strength
"6": ActivityType.MountainBiking,
"7": ActivityType.Hiking,
"8": ActivityType.CrossCountrySkiing,
"9": ActivityType.Cycling, # Velomobil (HPV)
"10": ActivityType.Other, # Ball Games
"11": ActivityType.Rowing, # Water Sports
"12": ActivityType.Cycling # Pedelec
}
SupportedActivities = list(_activityMappings.keys())
def _add_auth_params(self, params=None, record=None):
"""
Adds username and password to the passed-in params,
returns modified params dict.
"""
from tapiriik.auth.credential_storage import CredentialStore
if params is None:
params = {}
if record:
email = CredentialStore.Decrypt(record.ExtendedAuthorization["Email"])
password = CredentialStore.Decrypt(record.ExtendedAuthorization["Password"])
params['user'] = email
params['pass'] = password
return params
def WebInit(self):
self.UserAuthorizationURL = WEB_ROOT + reverse("auth_simple", kwargs={"service": self.ID})
def Authorize(self, email, password):
"""
POST Username and Password
URL: http://app.velohero.com/sso
Parameters:
user = username
pass = password
view = json
The login was successful if you get HTTP status code 200.
For other HTTP status codes, the login was not successful.
"""
from tapiriik.auth.credential_storage import CredentialStore
res = requests.post(self._urlRoot + "/sso",
params={'user': email, 'pass': password, 'view': 'json'})
if res.status_code != 200:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
res.raise_for_status()
res = res.json()
if res["session"] is None:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
member_id = res["user-id"]
if not member_id:
raise APIException("Unable to retrieve user id", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
return (member_id, {}, {"Email": CredentialStore.Encrypt(email), "Password": CredentialStore.Encrypt(password)})
def RevokeAuthorization(self, serviceRecord):
pass # No auth tokens to revoke...
def DeleteCachedData(self, serviceRecord):
pass # No cached data...
def _parseDateTime(self, date):
return datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
def _durationToSeconds(self, dur):
parts = dur.split(":")
return int(parts[0])*3600 + int(parts[1])*60 + int(parts[2])
def DownloadActivityList(self, serviceRecord, exhaustive=False):
"""
GET List of Activities as JSON File
URL: http://app.velohero.com/export/workouts/json
Parameters:
user = username
pass = password
date_from = YYYY-MM-DD
date_to = YYYY-MM-DD
"""
activities = []
exclusions = []
discoveredWorkoutIds = []
params = self._add_auth_params({}, record=serviceRecord)
limitDateFormat = "%Y-%m-%d"
if exhaustive:
listEnd = datetime.now() + timedelta(days=1.5) # Who knows which TZ it's in
listStart = datetime(day=1, month=1, year=1980) # The beginning of time
else:
listEnd = datetime.now() + timedelta(days=1.5) # Who knows which TZ it's in
listStart = listEnd - timedelta(days=20) # Doesn't really matter
params.update({"date_from": listStart.strftime(limitDateFormat), "date_to": listEnd.strftime(limitDateFormat)})
logger.debug("Requesting %s to %s" % (listStart, listEnd))
res = requests.get(self._urlRoot + "/export/workouts/json", params=params)
if res.status_code != 200:
if res.status_code == 403:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to retrieve activity list")
res.raise_for_status()
try:
res = res.json()
except ValueError:
raise APIException("Could not decode activity list")
if "workouts" not in res:
raise APIException("No activities")
for workout in res["workouts"]:
workoutId = int(workout["id"])
if workoutId in discoveredWorkoutIds:
continue # There's the possibility of query overlap
discoveredWorkoutIds.append(workoutId)
if workout["file"] is not "1":
logger.debug("Skip workout with ID: " + str(workoutId) + " (no file)")
continue # Skip activity without samples (no PWX export)
activity = UploadedActivity()
logger.debug("Workout ID: " + str(workoutId))
# Duration (dur_time)
duration = self._durationToSeconds(workout["dur_time"])
activity.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=duration)
# Start time (date_ymd, start_time)
startTimeStr = workout["date_ymd"] + " " + workout["start_time"]
activity.StartTime = self._parseDateTime(startTimeStr)
# End time (date_ymd, start_time) + dur_time
activity.EndTime = self._parseDateTime(startTimeStr) + timedelta(seconds=duration)
# Sport (sport_id)
if workout["sport_id"] in self._reverseActivityMappings:
activity.Type = self._reverseActivityMappings[workout["sport_id"]]
else:
activity.Type = ActivityType.Other
# Distance (dist_km)
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Kilometers, value=float(workout["dist_km"]))
# Workout is hidden
activity.Private = workout["hide"] == "1"
activity.ServiceData = {"workoutId": workoutId}
activity.CalculateUID()
activities.append(activity)
return activities, exclusions
def DownloadActivity(self, serviceRecord, activity):
"""
GET Activity as a PWX File
URL: http://app.velohero.com/export/activity/pwx/<WORKOUT-ID>
Parameters:
user = username
pass = password
PWX export with laps.
"""
workoutId = activity.ServiceData["workoutId"]
logger.debug("Download PWX export with ID: " + str(workoutId))
params = self._add_auth_params({}, record=serviceRecord)
res = requests.get(self._urlRoot + "/export/activity/pwx/{}".format(workoutId), params=params)
if res.status_code != 200:
if res.status_code == 403:
raise APIException("No authorization to download activity with workout ID: {}".format(workoutId), block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to download activity with workout ID: {}".format(workoutId))
activity = PWXIO.Parse(res.content, activity)
return activity
def UploadActivity(self, serviceRecord, activity):
"""
POST a Multipart-Encoded File
URL: http://app.velohero.com/upload/file
Parameters:
user = username
pass = password
view = json
file = multipart-encodes file (fit, tcx, pwx, gpx, srm, hrm...)
Maximum file size per file is 16 MB.
"""
has_location = has_distance = has_speed = False
for lap in activity.Laps:
for wp in lap.Waypoints:
if wp.Location and wp.Location.Latitude and wp.Location.Longitude:
has_location = True
if wp.Distance:
has_distance = True
if wp.Speed:
has_speed = True
if has_location and has_distance and has_speed:
format = "fit"
data = FITIO.Dump(activity)
elif has_location and has_distance:
format = "tcx"
data = TCXIO.Dump(activity)
elif has_location:
format = "gpx"
data = GPXIO.Dump(activity)
else:
format = "fit"
data = FITIO.Dump(activity)
# Upload
files = {"file": ("tap-sync-" + str(os.getpid()) + "-" + activity.UID + "." + format, data)}
params = self._add_auth_params({"view":"json"}, record=serviceRecord)
res = requests.post(self._urlRoot + "/upload/file", files=files, params=params)
if res.status_code != 200:
if res.status_code == 403:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to upload activity")
res.raise_for_status()
try:
res = res.json()
except ValueError:
raise APIException("Could not decode activity list")
if "error" in res:
raise APIException(res["error"])
# Set date, start time, comment and sport
if "id" in res:
workoutId = res["id"]
params = self._add_auth_params({
"workout_date" : activity.StartTime.strftime("%Y-%m-%d"),
"workout_start_time" : activity.StartTime.strftime("%H:%M:%S"),
"workout_comment" : activity.Notes,
"sport_id" : self._activityMappings[activity.Type],
"workout_hide": "yes" if activity.Private else "no"
}, record=serviceRecord)
res = requests.get(self._urlRoot + "/workouts/change/{}".format(workoutId), params=params)
if res.status_code != 200:
if res.status_code == 403:
raise APIException("No authorization to change activity with workout ID: {}".format(workoutId), block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to change activity with workout ID: {}".format(workoutId))
return workoutId
| dlenski/tapiriik | tapiriik/services/VeloHero/velohero.py | Python | apache-2.0 | 12,643 |
from fabric.api import local
def tests():
commands = [
"export PYTHONPATH=.",
"export ASYNC_TEST_TIMEOUT=60",
"coverage run --source=. -m unittest discover -s tests/",
"coverage report -m",
]
local(' ; '.join(commands))
| joelmir/tornado-simple-api | fabfile.py | Python | mit | 265 |
import tensorflow as tf
from tensorflow.contrib import slim as slim
from avb.ops import *
import math
def encoder(x, config, eps=None, is_training=True):
output_size = config['output_size']
c_dim = config['c_dim']
df_dim = config['df_dim']
z_dist = config['z_dist']
z_dim = config['z_dim']
eps_dim = config['eps_dim']
eps_nbasis = config['eps_nbasis']
# Center x at 0
x = 2*x - 1
# Noise
if eps is None:
batch_size = tf.shape(x)[0]
eps = tf.random_normal(tf.stack([eps_nbasis, batch_size, eps_dim]))
net = flatten_spatial(x)
net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_0")
net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_1")
z0 = slim.fully_connected(net, z_dim, activation_fn=None, scope='z0',
weights_initializer=tf.truncated_normal_initializer(stddev=1e-5))
a_vec = []
for i in range(eps_nbasis):
a = slim.fully_connected(net, z_dim, activation_fn=None, scope='a_%d' % i)
a = tf.nn.elu(a - 5.) + 1.
a_vec.append(a)
# Noise basis
v_vec = []
for i in range(eps_nbasis):
with tf.variable_scope("eps_%d" % i):
fc_argscope = slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.elu)
with fc_argscope:
net = slim.fully_connected(eps[i], 128, scope='fc_0')
net = slim.fully_connected(net, 128, scope='fc_1')
net = slim.fully_connected(net, 128, scope='fc_2')
v = slim.fully_connected(net, z_dim, activation_fn=None, scope='v')
v_vec.append(v)
# Sample and Moments
z = z0
Ez = z0
Varz = 0.
for a, v in zip(a_vec, v_vec):
z += a*v
Ev, Varv = tf.nn.moments(v, [0])
Ez += a*Ev
Varz += a*a*Varv
# if z_dist == "uniform":
# z = tf.nn.sigmoid(z)
return z, Ez, Varz
| LMescheder/AdversarialVariationalBayes | avb/avb/models/full0_ac.py | Python | mit | 1,962 |
#-*- coding: utf-8 -*-
"""OAuth 2.0 Authorization"""
try: import simplejson as json
except ImportError: import json
from django.http import HttpResponseRedirect
try:
from django.http.request import absolute_http_url_re # Django 1.5+
except ImportError:
from django.http import absolute_http_url_re
from urllib import urlencode
from .consts import ACCESS_TOKEN_EXPIRATION, REFRESHABLE
from .consts import CODE, TOKEN, CODE_AND_TOKEN
from .consts import AUTHENTICATION_METHOD, MAC, BEARER, MAC_KEY_LENGTH
from .exceptions import OAuth2Exception
from .lib.uri import add_parameters, add_fragments, normalize
from .models import Client, AccessRange, Code, AccessToken, KeyGenerator
class AuthorizationException(OAuth2Exception):
"""Authorization exception base class."""
pass
class MissingRedirectURI(OAuth2Exception):
"""Neither the request nor the client specify a redirect_url."""
pass
class UnauthenticatedUser(OAuth2Exception):
"""The provided user is not internally authenticated, via
user.is_authenticated()"""
pass
class UnvalidatedRequest(OAuth2Exception):
"""The method requested requires a validated request to continue."""
pass
class InvalidRequest(AuthorizationException):
"""The request is missing a required parameter, includes an
unsupported parameter or parameter value, or is otherwise
malformed."""
error = 'invalid_request'
class InvalidClient(AuthorizationException):
"""Client authentication failed (e.g. unknown client, no
client credentials included, multiple client credentials
included, or unsupported credentials type)."""
error = 'invalid_client'
class UnauthorizedClient(AuthorizationException):
"""The client is not authorized to request an authorization
code using this method."""
error = 'unauthorized_client'
class AccessDenied(AuthorizationException):
"""The resource owner or authorization server denied the
request."""
error = 'access_denied'
class UnsupportedResponseType(AuthorizationException):
"""The authorization server does not support obtaining an
authorization code using this method."""
error = 'unsupported_response_type'
class InvalidScope(AuthorizationException):
"""The requested scope is invalid, unknown, or malformed."""
error = 'invalid_scope'
RESPONSE_TYPES = {
"code":CODE,
"token":TOKEN}
class Authorizer(object):
"""Access authorizer. Validates access credentials and generates
a response with an authorization code passed as a parameter to the
redirect URI, an access token passed as a URI fragment to the redirect
URI, or both.
**Kwargs:**
* *scope:* An iterable of oauth2app.models.AccessRange objects representing
the scope the authorizer can grant. *Default None*
* *authentication_method:* Type of token to generate. Possible
values are: oauth2app.consts.MAC and oauth2app.consts.BEARER
*Default oauth2app.consts.BEARER*
* *refreshable:* Boolean value indicating whether issued tokens are
refreshable. *Default True*
"""
client = None
access_ranges = None
valid = False
error = None
def __init__(
self,
scope=None,
authentication_method=AUTHENTICATION_METHOD,
refreshable=REFRESHABLE,
response_type=CODE):
if response_type not in [CODE, TOKEN, CODE_AND_TOKEN]:
raise OAuth2Exception("Possible values for response_type"
" are oauth2app.consts.CODE, oauth2app.consts.TOKEN, "
"oauth2app.consts.CODE_AND_TOKEN")
self.authorized_response_type = response_type
self.refreshable = refreshable
if authentication_method not in [BEARER, MAC]:
raise OAuth2Exception("Possible values for authentication_method"
" are oauth2app.consts.MAC and oauth2app.consts.BEARER")
self.authentication_method = authentication_method
if scope is None:
self.authorized_scope = None
elif isinstance(scope, AccessRange):
self.authorized_scope = set([scope.key])
else:
self.authorized_scope = set([x.key for x in scope])
def __call__(self, request):
"""Validate the request. Returns an error redirect if the
request fails authorization, or a MissingRedirectURI if no
redirect_uri is available.
**Args:**
* *request:* Django HttpRequest object.
*Returns HTTP Response redirect*"""
try:
self.validate(request)
except AuthorizationException, e:
# The request is malformed or invalid. Automatically
# redirects to the provided redirect URL.
return self.error_redirect()
return self.grant_redirect()
def validate(self, request):
"""Validate the request. Raises an AuthorizationException if the
request fails authorization, or a MissingRedirectURI if no
redirect_uri is available.
**Args:**
* *request:* Django HttpRequest object.
*Returns None*"""
self.response_type = request.REQUEST.get('response_type')
self.client_id = request.REQUEST.get('client_id')
self.redirect_uri = request.REQUEST.get('redirect_uri')
self.scope = request.REQUEST.get('scope')
if self.scope is not None:
self.scope = set(self.scope.split(','))
self.state = request.REQUEST.get('state')
self.user = request.user
self.request = request
try:
self._validate()
except AuthorizationException, e:
self._check_redirect_uri()
self.error = e
raise e
self.valid = True
def _validate(self):
"""Validate the request."""
if self.client_id is None:
raise InvalidRequest('No client_id')
try:
self.client = Client.objects.get(key=self.client_id)
except Client.DoesNotExist:
raise InvalidClient("client_id %s doesn't exist" % self.client_id)
# Redirect URI
if self.redirect_uri is None:
if self.client.redirect_uri is None:
raise MissingRedirectURI("No redirect_uri"
"provided or registered.")
elif self.client.redirect_uri is not None:
if normalize(self.redirect_uri) != normalize(self.client.redirect_uri):
self.redirect_uri = self.client.redirect_uri
raise InvalidRequest("Registered redirect_uri doesn't "
"match provided redirect_uri.")
self.redirect_uri = self.redirect_uri or self.client.redirect_uri
# Check response type
if self.response_type is None:
raise InvalidRequest('response_type is a required parameter.')
if self.response_type not in ["code", "token"]:
raise InvalidRequest("No such response type %s" % self.response_type)
# Response type
if self.authorized_response_type & RESPONSE_TYPES[self.response_type] == 0:
raise UnauthorizedClient("Response type %s not allowed." %
self.response_type)
if not absolute_http_url_re.match(self.redirect_uri):
raise InvalidRequest('Absolute URI required for redirect_uri')
# Scope
if self.authorized_scope is not None and self.scope is None:
self.scope = self.authorized_scope
if self.scope is not None:
self.access_ranges = AccessRange.objects.filter(key__in=self.scope)
access_ranges = set(self.access_ranges.values_list('key', flat=True))
difference = access_ranges.symmetric_difference(self.scope)
if len(difference) != 0:
raise InvalidScope("Following access ranges do not "
"exist: %s" % ', '.join(difference))
if self.authorized_scope is not None:
new_scope = self.scope - self.authorized_scope
if len(new_scope) > 0:
raise InvalidScope("Invalid scope: %s" % ','.join(new_scope))
def _check_redirect_uri(self):
"""Raise MissingRedirectURI if no redirect_uri is available."""
if self.redirect_uri is None:
raise MissingRedirectURI('No redirect_uri to send response.')
if not absolute_http_url_re.match(self.redirect_uri):
raise MissingRedirectURI('Absolute redirect_uri required.')
def error_redirect(self):
"""In the event of an error, return a Django HttpResponseRedirect
with the appropriate error parameters.
Raises MissingRedirectURI if no redirect_uri is available.
*Returns HttpResponseRedirect*"""
self._check_redirect_uri()
if self.error is not None:
e = self.error
else:
e = AccessDenied("Access Denied.")
parameters = {'error': e.error, 'error_description': u'%s' % e.message}
if self.state is not None:
parameters['state'] = self.state
redirect_uri = self.redirect_uri
if self.authorized_response_type & CODE != 0:
redirect_uri = add_parameters(redirect_uri, parameters)
if self.authorized_response_type & TOKEN != 0:
redirect_uri = add_fragments(redirect_uri, parameters)
return HttpResponseRedirect(redirect_uri)
def _query_string(self):
"""Returns the a url encoded query string useful for resending request
parameters when a user authorizes the request via a form POST.
Raises UnvalidatedRequest if the request has not been validated.
*Returns str*"""
if not self.valid:
raise UnvalidatedRequest("This request is invalid or has not"
"been validated.")
parameters = {
"response_type":self.response_type,
"client_id":self.client_id}
if self.redirect_uri is not None:
parameters["redirect_uri"] = self.redirect_uri
if self.state is not None:
parameters["state"] = self.state
if self.scope is not None:
parameters["scope"] = ','.join(self.scope)
return urlencode(parameters)
query_string = property(_query_string)
def grant_redirect(self):
"""On successful authorization of the request, return a Django
HttpResponseRedirect with the appropriate authorization code parameters
or access token URI fragments..
Raises UnvalidatedRequest if the request has not been validated.
*Returns HttpResponseRedirect*"""
if not self.valid:
raise UnvalidatedRequest("This request is invalid or has not "
"been validated.")
if self.user.is_authenticated():
parameters = {}
fragments = {}
if self.scope is not None:
access_ranges = list(AccessRange.objects.filter(key__in=self.scope))
else:
access_ranges = []
if RESPONSE_TYPES[self.response_type] & CODE != 0:
code = Code.objects.create(
user=self.user,
client=self.client,
redirect_uri=self.redirect_uri)
code.scope.add(*access_ranges)
code.save()
parameters['code'] = code.key
parameters['scope'] = ','.join(self.scope)
if RESPONSE_TYPES[self.response_type] & TOKEN != 0:
access_token = AccessToken.objects.create(
user=self.user,
client=self.client)
access_token.scope = access_ranges
fragments['access_token'] = access_token.token
if access_token.refreshable:
fragments['refresh_token'] = access_token.refresh_token
fragments['expires_in'] = ACCESS_TOKEN_EXPIRATION
if self.scope is not None:
fragments['scope'] = ','.join(self.scope)
if self.authentication_method == MAC:
access_token.mac_key = KeyGenerator(MAC_KEY_LENGTH)()
fragments["mac_key"] = access_token.mac_key
fragments["mac_algorithm"] = "hmac-sha-256"
fragments["token_type"] = "mac"
elif self.authentication_method == BEARER:
fragments["token_type"] = "bearer"
access_token.save()
if self.state is not None:
parameters['state'] = self.state
redirect_uri = add_parameters(self.redirect_uri, parameters)
redirect_uri = add_fragments(redirect_uri, fragments)
return HttpResponseRedirect(redirect_uri)
else:
raise UnauthenticatedUser("Django user object associated with the "
"request is not authenticated.")
| RaduGatej/SensibleData-Platform | sensible_data_platform/oauth2app/authorize.py | Python | mit | 12,944 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2015 Vauxoo
# Author : Yanina Aular <[email protected]>
# Osval Reyes <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import claim_line
from . import crm_claim
| Endika/rma | crm_rma_claim_make_claim/models/__init__.py | Python | agpl-3.0 | 1,040 |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.domain import exp_domain
from core.domain import question_domain
from core.domain import question_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
(question_models,) = models.Registry.import_models([models.NAMES.question])
memcache_services = models.Registry.import_memcache_services()
class QuestionServicesUnitTest(test_utils.GenericTestBase):
"""Test the question services module."""
def setUp(self):
"""Before each individual test, create dummy user."""
super(QuestionServicesUnitTest, self).setUp()
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(self.owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
def test_add_question(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
question_id = 'dummy'
title = 'A Question'
question_data_schema_version = 1
collection_id = 'col1'
language_code = 'en'
question = question_domain.Question(
question_id, title, question_data, question_data_schema_version,
collection_id, language_code)
question.validate()
question_model = question_services.add_question(self.owner_id, question)
model = question_models.QuestionModel.get(question_model.id)
self.assertEqual(model.title, title)
self.assertEqual(model.question_data, question_data)
self.assertEqual(model.question_data_schema_version,
question_data_schema_version)
self.assertEqual(model.collection_id, collection_id)
self.assertEqual(model.language_code, language_code)
def test_delete_question(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
question_id = 'dummy'
title = 'A Question'
question_data_schema_version = 1
collection_id = 'col1'
language_code = 'en'
question = question_domain.Question(
question_id, title, question_data, question_data_schema_version,
collection_id, language_code)
question.validate()
question_model = question_services.add_question(self.owner_id, question)
question_services.delete_question(self.owner_id, question_model.id)
with self.assertRaisesRegexp(Exception, (
'Entity for class QuestionModel with id %s not found' %(
question_model.id))):
question_models.QuestionModel.get(question_model.id)
def test_update_question(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
question_id = 'dummy'
title = 'A Question'
question_data_schema_version = 1
collection_id = 'col1'
language_code = 'en'
question = question_domain.Question(
question_id, title, question_data, question_data_schema_version,
collection_id, language_code)
question.validate()
question_model = question_services.add_question(self.owner_id, question)
change_dict = {'cmd': 'update_question_property',
'property_name': 'title',
'new_value': 'ABC',
'old_value': 'A Question'}
change_list = [question_domain.QuestionChange(change_dict)]
question_services.update_question(
self.owner_id, question_model.id, change_list, 'updated title')
model = question_models.QuestionModel.get(question_model.id)
self.assertEqual(model.title, 'ABC')
self.assertEqual(model.question_data, question_data)
self.assertEqual(model.question_data_schema_version,
question_data_schema_version)
self.assertEqual(model.collection_id, collection_id)
self.assertEqual(model.language_code, language_code)
| himanshu-dixit/oppia | core/domain/question_services_test.py | Python | apache-2.0 | 4,658 |
"""This class manages dataset downloads concurrently and processes progress
output."""
import wx
from retriever.lib.download import DownloadThread
class DownloadManager:
def __init__(self, parent):
self.dialog = None
self.worker = None
self.queue = []
self.downloaded = set()
self.errors = set()
self.warnings = set()
self.Parent = parent
self.timer = wx.Timer(parent, -1)
self.timer.interval = 10
parent.Bind(wx.EVT_TIMER, self.update, self.timer)
def Download(self, script):
if not script in self.queue and not (self.worker and self.worker.script == script):
self.queue.append(script)
self.downloaded.add(script)
if script in self.errors:
self.errors.remove(script)
self.warnings.remove(script)
self.Parent.script_list.RefreshMe(None)
if not self.timer.IsRunning() and not self.worker and len(self.queue) < 2:
self.timer.Start(self.timer.interval)
return True
return False
def update(self, evt):
self.timer.Stop()
terminate = False
if self.worker:
script = self.worker.script
if self.worker.finished() and len(self.worker.output) == 0:
if hasattr(script, 'warnings') and script.warnings:
self.warnings.add(script)
self.Parent.SetStatusText('\n'.join(str(w) for w in script.warnings))
else:
self.Parent.SetStatusText("")
self.worker = None
self.Parent.script_list.RefreshMe(None)
self.timer.Start(self.timer.interval)
else:
self.worker.output_lock.acquire()
while len(self.worker.output) > 0 and not terminate:
if "Error:" in self.worker.output[0] and script in self.downloaded:
self.downloaded.remove(script)
self.errors.add(script)
if self.write(self.worker) == False:
terminate = True
self.worker.output = self.worker.output[1:]
#self.gauge.SetValue(100 * ((self.worker.scriptnum) /
# (self.worker.progress_max + 1.0)))
self.worker.output_lock.release()
if terminate:
self.Parent.Quit(None)
else:
self.timer.Start(self.timer.interval)
elif self.queue:
script = self.queue[0]
self.queue = self.queue[1:]
self.worker = DownloadThread(self.Parent.engine, script)
self.worker.parent = self
self.worker.start()
self.timer.Start(10)
def flush(self):
pass
def write(self, worker):
s = worker.output[0]
if '\b' in s:
s = s.replace('\b', '')
if not self.dialog:
wx.GetApp().Yield()
self.dialog = wx.ProgressDialog("Download Progress",
"Downloading datasets . . .\n"
+ " " * len(s),
maximum=1000,
parent=None,
style=wx.PD_SMOOTH
| wx.DIALOG_NO_PARENT
| wx.PD_CAN_ABORT
| wx.PD_AUTO_HIDE
| wx.PD_REMAINING_TIME
)
def progress(s):
if ' / ' in s:
s = s.split(' / ')
total = float(s[1])
current = float(s[0].split(': ')[1])
progress = int((current / total) * 1000)
return (progress if progress > 1 else 1)
else:
return None
current_progress = progress(s)
if current_progress:
(keepgoing, skip) = self.dialog.Update(current_progress, s)
else:
(keepgoing, skip) = self.dialog.Pulse(s)
if not keepgoing:
return False
else:
if self.dialog:
self.dialog.Update(1000, "")
self.dialog.Destroy()
self.dialog = None
if '...' in s:
self.Parent.SetStatusText(s)
else:
self.Parent.script_list.SetStatus(worker.script.name, s)
wx.GetApp().Yield()
return True
| embaldridge/retriever | app/download_manager.py | Python | mit | 4,858 |
#!/usr/bin/env python3
"""Split VCF into singleton vs other duplicated categories
Usage:
<program> input_vcf input_categories
Input and output VCFs can be compressed with gzip, ending in .gz
"""
# Modules
from collections import defaultdict
from collections import Counter
import gzip
import sys
# Functions
def myopen(_file, mode="rt"):
if _file.endswith(".gz"):
return gzip.open(_file, mode=mode)
else:
return open(_file, mode=mode)
# Parse user input
try:
input_vcf = sys.argv[1]
input_categories = sys.argv[2]
except:
print(__doc__)
sys.exit(1)
# Read categories
categories = set()
snps = dict()
loci = defaultdict(list)
with open(input_categories) as infile:
for line in infile:
if line.startswith("Scaffold"):
continue
scaffold, position, snp, category = line.strip().split()
categories.add(category)
locus = snp.split("_")[0]
snps[(scaffold, position, snp)] = category
loci[locus].append(category)
# Open output file handles
output_vcfs = dict()
for category in categories:
if input_vcf.endswith(".gz"):
compression = ".gz"
else:
compression = ""
output_vcfs[category] = myopen(input_vcf.replace(".vcf", "").replace(".gz", "") + "." + category + ".vcf" + compression, "wt")
# Read and split VCF
with myopen(input_vcf) as infile:
for line in infile:
if line.startswith("#"):
for output in output_vcfs:
output_vcfs[output].write(line)
continue
# Write SNPs in their respective output files
l = line.strip().split()
scaffold, position, snp = l[:3]
category = snps[(scaffold, position, snp)]
output_vcfs[category].write(line)
| enormandeau/stacks_workflow | 00-scripts/10_split_vcf_in_categories.py | Python | gpl-3.0 | 1,776 |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_duckduckgo
# Purpose: Queries DuckDuckGo's API for information abotut the target.
#
# Author: Steve Micallef <[email protected]>
#
# Created: 21/07/2015
# Copyright: (c) Steve Micallef 2015
# Licence: GPL
# -------------------------------------------------------------------------------
import json
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_duckduckgo(SpiderFootPlugin):
"""DuckDuckGo:Query DuckDuckGo's API for descriptive information about your target."""
# Default options
opts = {
"affiliatedomains": True
}
# Option descriptions
optdescs = {
"affiliatedomains": "For affiliates, look up the domain name, not the hostname. This will usually return more meaningful information about the affiliate."
}
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["DOMAIN_NAME", "INTERNET_NAME", "AFFILIATE_INTERNET_NAME"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["DESCRIPTION_CATEGORY", "DESCRIPTION_ABSTRACT",
"AFFILIATE_DESCRIPTION_CATEGORY",
"AFFILIATE_DESCRIPTION_ABSTRACT"]
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.opts['affiliatedomains'] and "AFFILIATE_" in eventName:
eventData = self.sf.hostDomain(eventData, self.opts['_internettlds'])
if eventData in self.results:
self.sf.debug("Already did a search for " + eventData + ", skipping.")
return None
else:
self.results.append(eventData)
url = "https://api.duckduckgo.com/?q=" + eventData + "&format=json&pretty=1"
res = self.sf.fetchUrl(url, timeout=self.opts['_fetchtimeout'],
useragent="SpiderFoot")
if res == None:
self.sf.error("Unable to fetch " + url, False)
return None
try:
ret = json.loads(res['content'])
except BaseException as e:
ret = None
if ret == None:
self.sf.error("Unable to process empty response from DuckDuckGo for: " + \
eventData, False)
return None
if ret['Heading'] == "":
self.sf.debug("No DuckDuckGo information for " + eventData)
return None
# Submit the bing results for analysis
evt = SpiderFootEvent("SEARCH_ENGINE_WEB_CONTENT", res['content'],
self.__name__, event)
self.notifyListeners(evt)
if 'AbstractText' in ret:
name = "DESCRIPTION_ABSTRACT"
if "AFFILIATE" in eventName:
name = "AFFILIATE_" + name
evt = SpiderFootEvent(name, ret['AbstractText'],
self.__name__, event)
self.notifyListeners(evt)
if 'RelatedTopics' in ret:
name = "DESCRIPTION_CATEGORY"
if "AFFILIATE" in eventName:
name = "AFFILIATE_" + name
for item in ret['RelatedTopics']:
cat = item['Text']
if cat == None or cat == "":
self.sf.debug("No category text found from DuckDuckGo.")
continue
evt = SpiderFootEvent(name, cat, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_duckduckgo class
| Wingless-Archangel/spiderfoot | modules/sfp_duckduckgo.py | Python | gpl-2.0 | 4,048 |
import platform
if platform.python_implementation() == 'PyPy':
def compress(*a, **kw):
from lz4framed import compress as _compress
return _compress(*a, **kw)
def decompress(*a, **kw):
from lz4framed import decompress as _decompress
return _decompress(*a, **kw)
else:
from lz4framed import compress, decompress
__all__ = ['compress', 'decompress']
| douban/dpark | dpark/utils/lz4wrapper.py | Python | bsd-3-clause | 396 |
import imboclient.test.integration.config as config
import imboclient.client as imbo
import os
import json
class TestClient:
def setup(self):
self._host = config.server['host'] + ":" + config.server['port']
self._public = config.server['public']
self._private = config.server['private']
self._client = imbo.Client([self._host], self._public, self._private)
self._res_path = os.path.dirname(__file__)
self._valid_image_path = self._res_path + '/res/imbologo.png'
self._last_imbo_id = None
def teardown(self):
# delete our test image after every test for consistency
if self._last_imbo_id:
self._delete_test_image(self._last_imbo_id)
self._client = None
def _add_test_image(self):
res = self._client.add_image(self._valid_image_path)
if 'imageIdentifier' in res:
self._last_imbo_id = res['imageIdentifier']
return res
def _delete_test_image(self, imbo_id):
return self._client.delete_image(imbo_id)
def test_add_new_image(self):
result = self._add_test_image()
assert len(result['imageIdentifier']) > 0
def test_add_new_image_from_string(self):
image_string = open(self._valid_image_path, 'rb').read()
result = self._client.add_image_from_string(image_string)
assert len(result['imageIdentifier']) > 0
self._client.delete_image(result['imageIdentifier'])
def test_add_new_invalid_image_from_string(self):
image_string = 'invalidimagedata'
try:
result = self._client.add_image_from_string(image_string)
assert False
except self._client.ImboInternalError:
pass
def test_add_new_image_from_url(self):
image_url = 'https://raw.github.com/andreasrs/ImboclientPython/master/imboclient/test/integration/res/imbologo.png' # TODO remove dependency to github
result = self._client.add_image_from_url(image_url)
assert result['imageIdentifier']
self._client.delete_image(result['imageIdentifier'])
def test_image_exists(self):
imbo_id = self._add_test_image()['imageIdentifier']
self._delete_test_image(imbo_id)
result = self._client.image_exists(imbo_id)
assert not result
def test_head_image(self):
imbo_id = self._add_test_image()['imageIdentifier']
result = self._client.head_image(imbo_id)
assert result.status_code == 200
def test_edit_metadata(self):
imbo_id = self._add_test_image()['imageIdentifier']
metadata = {"Key1": "Value1"}
result = self._client.edit_metadata(imbo_id, metadata)
assert result == metadata
def test_replace_metadata(self):
imbo_id = self._add_test_image()['imageIdentifier']
metadata = {"Key1": "Value1"}
result = self._client.replace_metadata(imbo_id, metadata)
assert result == metadata
def test_delete_metadata(self):
imbo_id = self._add_test_image()['imageIdentifier']
result = self._client.delete_metadata(imbo_id)
def test_num_images(self):
result = self._client.num_images()
assert result >= 0
def test_images(self):
result = self._client.images()
assert 'images' in result
assert result['search']
assert 'count' in result['search']
assert 'hits' in result['search']
assert result['search']['limit'] > 0
assert result['search']['page'] == 1
def test_image_data(self):
imbo_id = self._add_test_image()['imageIdentifier']
result = self._client.image_data(imbo_id)
assert result.status_code == 200
assert result.text
def test_image_data_from_url(self):
image_url = 'https://raw.github.com/andreasrs/ImboclientPython/master/imboclient/test/integration/res/imbologo.png' # TODO remove dependency to github
result = self._client.image_data_from_url(image_url)
assert result.status_code == 200
assert result.text
def test_image_properties(self):
imbo_id = self._add_test_image()['imageIdentifier']
result = self._client.image_properties(imbo_id)
assert result['x-imbo-originalwidth']
assert result['x-imbo-originalfilesize']
assert result['x-imbo-originalheight']
assert result['x-imbo-originalextension']
assert result['x-imbo-originalmimetype']
def test_server_status(self):
result = self._client.server_status()
assert result['date']
assert result['storage']
assert result['database']
def test_user_info(self):
result = self._client.user_info()
assert result['user']
assert result['lastModified']
assert result['numImages'] >= 0
| imbo/imboclient-python | imboclient/test/integration/test_client.py | Python | mit | 4,827 |
#!/usr/bin/env python
import django
from django.conf import settings
TEMPLATE_CONTEXT_PROCESSORS = [
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
settings.configure(
DEBUG = True,
USE_TZ = True,
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'pandocfield',
],
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
],
TEMPLATE_CONTEXT_PROCESSORS = TEMPLATE_CONTEXT_PROCESSORS,
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': TEMPLATE_CONTEXT_PROCESSORS,
},
},
],
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
)
django.setup()
try:
# Django <= 1.8
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=1)
except ImportError:
# Django >= 1.8
from django.test.runner import DiscoverRunner
test_runner = DiscoverRunner(verbosity=1)
failures = test_runner.run_tests(['pandocfield'])
if failures:
sys.exit(failures)
| JaapJoris/django-pandocfield | runtests.py | Python | gpl-3.0 | 2,001 |
# -*- coding: utf-8 -*-
"""
@file: tasks.py
@author: lyn
@contact: [email protected]
@python: 3.5
@editor: Vim
@create: 3/29/17 2:20 AM
@description:
用于反爬虫的一些异步任务,主要是刷新数据表中某些临时记录。
"""
from __future__ import absolute_import, unicode_literals
from celery import task as celery_task
from .models import Ban, RecentIpActivity
from django.utils import timezone
@celery_task(name="refresh_ban")
def refresh_ban():
clear_bans = []
for ban in Ban.objects.all():
if ban.ban_to < timezone.now():
ban.delete()
print("clear {} from Ban".format(ban.ip))
clear_bans.append(ban.ip)
return clear_bans
@celery_task(name="refresh_ip_activity")
def refresh_ip_activity():
clear_act_ips = []
for ip_activity in RecentIpActivity.objects.all():
if ip_activity.destroy_time < timezone.now():
ip_activity.delete()
print("clear {} acts from activities".format(ip_activity.ip))
clear_act_ips.append(ip_activity.ip)
return clear_act_ips
| lyn716/deep_stack | django_server/RobotKiller/tasks.py | Python | apache-2.0 | 1,115 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Test components of specific crowdsourcing tasks.
"""
import json
import os
import unittest
import pandas as pd
import parlai.utils.testing as testing_utils
try:
from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import (
TurnAnnotationsStaticResultsCompiler,
)
from parlai.crowdsourcing.utils.tests import check_stdout
class TestAnalysis(unittest.TestCase):
"""
Test the analysis code for the static turn annotations task.
"""
def test_compile_results(self):
"""
Test compiling results on a dummy set of data.
"""
with testing_utils.tempdir() as tmpdir:
# Define expected stdout
# Paths
analysis_samples_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'analysis_samples'
)
analysis_outputs_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'test_turn_annotations_static_analysis',
)
expected_stdout_path = os.path.join(
analysis_outputs_folder, 'test_stdout.txt'
)
temp_gold_annotations_path = os.path.join(
tmpdir, 'gold_annotations.json'
)
# Save a file of gold annotations
gold_annotations = {
"1_0_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": False,
"none_all_good": True,
},
"1_1_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
},
"2_0_5": {
"bucket_0": False,
"bucket_1": True,
"bucket_2": False,
"bucket_3": False,
"bucket_4": False,
"none_all_good": False,
},
"2_1_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
},
}
with open(temp_gold_annotations_path, 'w') as f:
json.dump(gold_annotations, f)
# Run compilation of results
parser = TurnAnnotationsStaticResultsCompiler.setup_args()
parser.set_defaults(
**{
'results_folders': analysis_samples_folder,
'output_folder': tmpdir,
'onboarding_in_flight_data_file': os.path.join(
analysis_samples_folder, 'onboarding_in_flight.jsonl'
),
'gold_annotations_file': temp_gold_annotations_path,
}
)
args = parser.parse_args([])
with testing_utils.capture_output() as output:
compiler = TurnAnnotationsStaticResultsCompiler(vars(args))
compiler.NUM_SUBTASKS = 3
compiler.NUM_ANNOTATIONS = 3
compiler.compile_and_save_results()
actual_stdout = output.getvalue()
# Check the output against what it should be
check_stdout(
actual_stdout=actual_stdout,
expected_stdout_path=expected_stdout_path,
)
# Check that the saved results file is what it should be
sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx']
expected_results_path = os.path.join(
analysis_outputs_folder, 'expected_results.csv'
)
expected_results = (
pd.read_csv(expected_results_path)
.drop('folder', axis=1)
.sort_values(sort_columns)
.reset_index(drop=True)
)
# Drop the 'folder' column, which contains a system-dependent path
# string
actual_results_rel_path = [
obj for obj in os.listdir(tmpdir) if obj.startswith('results')
][0]
actual_results_path = os.path.join(tmpdir, actual_results_rel_path)
actual_results = (
pd.read_csv(actual_results_path)
.drop('folder', axis=1)
.sort_values(sort_columns)
.reset_index(drop=True)
)
if not actual_results.equals(expected_results):
raise ValueError(
f'\n\n\tExpected results:\n{expected_results.to_csv()}'
f'\n\n\tActual results:\n{actual_results.to_csv()}'
)
except ImportError:
pass
if __name__ == "__main__":
unittest.main()
| facebookresearch/ParlAI | tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py | Python | mit | 5,770 |
"""todoApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home),
url(r'^addTodo/$', views.addTodo),
url(r'^deleteTodo/(?P<todoId>\w+)/$', views.deleteTodo),
url(r'^appendTodo/(?P<todoId>\w+)/$', views.appendTodo),
url(r'^getTodos/$', views.getTodos),
url(r'^resetSearch/$', views.resetSearch)
]
| rusia-rak/todoApp | core/urls.py | Python | mpl-2.0 | 990 |
from OpenGL.GL import *
from pyrr import Quaternion, Vector3, Matrix44
from PySide import QtCore
from PySide.QtGui import (
QWidget,
QHBoxLayout,
QVBoxLayout,
QToolBar,
QIcon,
QFileDialog,
QTableWidget,
QAbstractItemView,
QHeaderView,
QLabel,
QCheckBox,
QSlider,
QLineEdit,
QFont,
QFontMetrics,
QDoubleSpinBox,
)
import projection as proj
from glsl import Shader
class LayerListWithToolBar(QWidget):
def __init__(self):
super(LayerListWithToolBar, self).__init__()
layout = QVBoxLayout()
self.setLayout(layout)
self.list = LayerList()
self.toolbar = QToolBar()
add_action = self.toolbar.addAction(
QIcon.fromTheme('list-add'),
'add',
).triggered.connect(self._add)
remove_action = self.toolbar.addAction(
QIcon.fromTheme('list-remove'),
'remove',
).triggered.connect(self._remove)
self.layout().addWidget(self.toolbar)
self.layout().addWidget(self.list)
def _add(self):
dialog = QFileDialog(self)
dialog.setFileMode(QFileDialog.ExistingFiles)
dialog.setViewMode(QFileDialog.Detail)
fileNames = dialog.selectedFiles() if dialog.exec_() else []
for fileName in fileNames:
layer = Layer()
layer.load_file(fileName, None)
self.list.add_layer(layer)
def _remove(self):
rows = sorted({index.row() for index in self.list.selectedIndexes()})
for row in reversed(rows):
self.list.remove_layer(row)
def add_layer(self, layer):
self.list.add_layer(layer)
def __iter__(self):
return self.list.__iter__()
def multiplyOrientation(self, quat):
self.list.multiplyOrientation(quat)
class LayerList(QTableWidget):
def __init__(self):
super(LayerList, self).__init__(0, 8)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setDragDropMode(QAbstractItemView.InternalMove)
self.setHorizontalHeaderLabels([
'S',
'V', 'alpha', '',
'M', 'orientation (w, x, y, z)',
'size [byte]', # Question(mkovacs): Should this say 'octet' instead?
'file',
])
hheader = self.horizontalHeader()
hheader.setStretchLastSection(True)
hheader.setResizeMode(QHeaderView.ResizeToContents)
hheader.setResizeMode(2, QHeaderView.Interactive)
vheader = self.verticalHeader()
vheader.setResizeMode(QHeaderView.ResizeToContents)
self.layers = []
def dropEvent(self, event):
if event.source() != self:
QTableView.dropEvent(event)
return
src_row = self.selectedIndexes()[0].row()
dst_row = self.rowAt(event.pos().y())
if dst_row == -1:
dst_row = self.rowCount()
self.move_layer(src_row, dst_row)
if src_row < dst_row:
dst_row -= 1
self.selectRow(dst_row)
event.accept()
def move_layer(self, src_row, dst_row):
self.insert_layer(dst_row, self.layers[src_row])
if dst_row < src_row:
src_row += 1
self.remove_layer(src_row)
def insert_layer(self, index, layer):
self.layers.insert(index, layer)
self.insertRow(index)
layer.setup_ui(self, index)
def remove_layer(self, index):
self.removeRow(index)
del self.layers[index]
def add_layer(self, layer):
self.insert_layer(0, layer)
def __iter__(self):
return reversed(self.layers)
def multiplyOrientation(self, quat):
for layer in self.layers:
if layer.move.isChecked():
layer.multiplyOrientation(quat)
class Layer(object):
def __init__(self):
super(Layer, self).__init__()
self.orientation = Quaternion()
self.picked = None
self.show = QCheckBox()
self.show.setChecked(True)
self.alpha_slider = QSlider(QtCore.Qt.Orientation.Horizontal)
self.alpha_slider.setRange(0, 1024)
self.alpha_slider.setValue(1024)
self.alpha_number = QDoubleSpinBox()
self.alpha_number.setDecimals(3)
self.alpha_number.setSingleStep(0.01)
self.alpha_number.setRange(0, 1)
self.alpha_number.setValue(1)
self.alpha_slider.valueChanged.connect(self._alphaSliderChanged)
self.alpha_number.valueChanged.connect(self._alphaNumberChanged)
self.move = QCheckBox()
self.move.setChecked(True)
self.quat = QLineEdit()
font = QFont('monospace')
font.setStyleHint(QFont.TypeWriter)
self.quat.setFont(font)
default_quat = '+0.000, +1.000, +0.000, +0.000'
margins = self.quat.textMargins()
self.quat.setFixedWidth(
# HACK -------------------------------------------v
QFontMetrics(self.quat.font()).width(default_quat + ' ') +
margins.left() + margins.right()
)
self.quat.setInputMask('#0.000, #0.000, #0.000, #0.000')
self.quat.setMaxLength(30)
self.quat.setText(default_quat)
self.quat.editingFinished.connect(self._orientationChanged)
self.nbytes = QLabel()
self.nbytes.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.nbytes.setText('0')
self.label = QLabel()
self.label.setText('<empty>')
def multiplyOrientation(self, quat):
self.setOrientation(quat * self.orientation)
def setOrientation(self, quat):
self.orientation = quat
self.quat.setText(
'%+1.3f, %+1.3f, %+1.3f, %+1.3f' % (
self.orientation.w,
self.orientation.x,
self.orientation.y,
self.orientation.z,
)
)
def _orientationChanged(self):
text = self.quat.text()
def alpha(self):
return self.alpha_number.value() if self.show.isChecked() else 0.0
def _alphaSliderChanged(self):
self.alpha_number.setValue(self.alpha_slider.value() / 1024.0)
def _alphaNumberChanged(self):
self.alpha_slider.setValue(1024 * self.alpha_number.value())
def setup_ui(self, table, row):
widgets = [
None,
CenterH(self.show),
self.alpha_slider,
self.alpha_number,
CenterH(self.move),
self.quat,
self.nbytes,
self.label,
]
for (column, widget) in enumerate(widgets):
if widget is not None:
table.setCellWidget(row, column, widget)
def load_file(self, file_name, in_format):
self.sphere = proj.load_sphere(file_name, projection=in_format)
in_format = self.sphere.__class__
print('Loaded input %s from %s.' % (in_format.__name__, file_name))
self.texture_id = glGenTextures(1)
self.sphere.to_gl(self.texture_id)
self.shader = Shader(
vert=VERTEX_SHADER,
frag=FRAGMENT_SHADER + self.sphere.get_glsl_sampler(),
)
self.label.setText(file_name)
self.nbytes.setText(read_bsize(self.sphere.array.nbytes))
def read_bsize(n):
suffixes = ['', ' Ki', ' Mi', ' Gi', ' Ti', ' Pi', ' Ei', ' Zi', ' Yi']
for suffix in suffixes:
if n < 1024:
break
n /= 1024.0
return '%s%s' % (format(n, '.2f').rstrip('0').rstrip('.'), suffix)
class CenterH(QWidget):
def __init__(self, widget):
super(CenterH, self).__init__()
layout = QHBoxLayout()
self.setLayout(layout)
layout.addStretch()
layout.addWidget(widget)
layout.addStretch()
VERTEX_SHADER = '''
#version 120
uniform mat4x4 viewTransform;
attribute vec3 vert;
varying vec3 texCoord;
void main()
{
gl_Position = viewTransform * vec4(vert, 1);
texCoord = vert.xyz;
}
'''
FRAGMENT_SHADER = '''
#version 120
uniform float alphaFactor;
uniform mat3x3 orientation;
uniform vec3 picked;
varying vec3 texCoord;
vec4 sample(vec3 v);
void main()
{
gl_FragColor = (length(picked - texCoord) < 0.005)
? vec4(1.0, 0.0, 0.0, 1.0)
: sample(orientation * texCoord);
gl_FragColor.a *= alphaFactor;
}
'''
| mkovacs/sphaira | sphaira/layer.py | Python | lgpl-3.0 | 8,379 |
# This file is part of MyPaint.
# Copyright (C) 2009 by Martin Renold <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import gtk
gdk = gtk.gdk
from lib import document
import tileddrawwidget, brushmanager, dialogs
from gettext import gettext as _
def startfile(path):
import os
import platform
if platform.system == 'Windows':
os.startfile(path)
else:
os.system("xdg-open " + path)
def stock_button(stock_id):
b = gtk.Button()
img = gtk.Image()
img.set_from_stock(stock_id, gtk.ICON_SIZE_MENU)
b.add(img)
return b
class BrushManipulationWidget(gtk.HBox):
""" """
def __init__(self, app, brushicon_editor):
gtk.HBox.__init__(self)
self.app = app
self.bm = app.brushmanager
self.brushicon_editor = brushicon_editor
self.init_widgets()
self.bm.selected_brush_observers.append(self.brush_selected_cb)
def init_widgets(self):
l = self.brush_name_label = gtk.Label()
l.set_text(_('(unnamed brush)'))
self.pack_start(l, expand=True)
right_vbox_buttons = [
(gtk.STOCK_SAVE, self.update_settings_cb, _('Save Settings')),
(gtk.STOCK_ADD, self.create_brush_cb, _('Add As New')),
(gtk.STOCK_PROPERTIES, self.edit_brush_cb, _('Edit Brush Icon')),
(gtk.STOCK_EDIT, self.rename_brush_cb, _('Rename...')),
(gtk.STOCK_DELETE, self.delete_brush_cb, _('Remove...')),
]
for stock_id, clicked_cb, tooltip in reversed(right_vbox_buttons):
b = stock_button(stock_id)
b.connect('clicked', clicked_cb)
b.set_tooltip_text(tooltip)
self.pack_end(b, expand=False)
def brush_selected_cb(self, managed_brush, brushinfo):
name = managed_brush.name
if name is None:
name = _('(unnamed brush)')
else:
name = name.replace('_', ' ') # XXX safename/unsafename utils?
self.brush_name_label.set_text(name)
def edit_brush_cb(self, window):
self.edit_brush_properties_cb()
def create_brush_cb(self, window):
"""Create and save a new brush based on the current working brush."""
b = brushmanager.ManagedBrush(self.bm)
b.brushinfo = self.app.brush.clone()
b.brushinfo.set_string_property("parent_brush_name", None) #avoid mis-hilight
b.preview = self.brushicon_editor.get_preview_pixbuf()
b.save()
if self.bm.active_groups:
group = self.bm.active_groups[0]
else:
group = brushmanager.DEFAULT_BRUSH_GROUP
brushes = self.bm.get_group_brushes(group, make_active=True)
brushes.insert(0, b)
b.persistent = True # Brush was saved
b.in_brushlist = True
for f in self.bm.brushes_observers: f(brushes)
self.bm.select_brush(b)
# Pretend that the active app.brush is a child of the new one, for the
# sake of the strokemap and strokes drawn immediately after.
self.app.brush.set_string_property("parent_brush_name", b.name)
def rename_brush_cb(self, window):
src_brush = self.bm.selected_brush
if not src_brush.name:
dialogs.error(self, _('No brush selected!'))
return
dst_name = dialogs.ask_for_name(self, _("Rename Brush"), src_brush.name.replace('_', ' '))
if not dst_name:
return
dst_name = dst_name.replace(' ', '_')
# ensure we don't overwrite an existing brush by accident
dst_deleted = None
for group, brushes in self.bm.groups.iteritems():
for b2 in brushes:
if b2.name == dst_name:
if group == brushmanager.DELETED_BRUSH_GROUP:
dst_deleted = b2
else:
dialogs.error(self, _('A brush with this name already exists!'))
return
print 'renaming brush', repr(src_brush.name), '-->', repr(dst_name)
if dst_deleted:
deleted_brushes = self.bm.get_group_brushes(brushmanager.DELETED_BRUSH_GROUP)
deleted_brushes.remove(dst_deleted)
for f in self.bm.brushes_observers: f(deleted_brushes)
# save src as dst
src_name = src_brush.name
src_brush.name = dst_name
src_brush.save()
src_brush.name = src_name
# load dst
dst_brush = brushmanager.ManagedBrush(self.bm, dst_name, persistent=True)
dst_brush.load()
dst_brush.in_brushlist = True
# replace src with dst (but keep src in the deleted list if it is a stock brush)
self.delete_brush_internal(src_brush, replacement=dst_brush)
self.bm.select_brush(dst_brush)
def update_settings_cb(self, window):
b = self.bm.selected_brush
if not b.name:
dialogs.error(self, _('No brush selected, please use "Add As New" instead.'))
return
b.brushinfo = self.app.brush.clone()
b.save()
def delete_brush_cb(self, window):
b = self.bm.selected_brush
if not b.name:
dialogs.error(self, _('No brush selected!'))
return
if not dialogs.confirm(self, _("Really delete brush from disk?")):
return
self.bm.select_brush(None)
self.delete_brush_internal(b)
def delete_brush_internal(self, b, replacement=None):
for brushes in self.bm.groups.itervalues():
if b in brushes:
idx = brushes.index(b)
if replacement:
brushes[idx] = replacement
else:
del brushes[idx]
for f in self.bm.brushes_observers: f(brushes)
assert b not in brushes, 'Brush exists multiple times in the same group!'
if not b.delete_from_disk():
# stock brush can't be deleted
deleted_brushes = self.bm.get_group_brushes(brushmanager.DELETED_BRUSH_GROUP)
deleted_brushes.insert(0, b)
for f in self.bm.brushes_observers: f(deleted_brushes)
class BrushIconEditorWidget(gtk.VBox):
def __init__(self, app):
gtk.VBox.__init__(self)
self.app = app
self.bm = app.brushmanager
self.set_border_width(8)
self.init_widgets()
self.bm.selected_brush_observers.append(self.brush_selected_cb)
self.set_brush_preview_edit_mode(False)
def init_widgets(self):
button_box = gtk.HBox()
doc = document.Document(self.app.brush)
self.tdw = tileddrawwidget.TiledDrawWidget(self.app, doc)
self.tdw.set_size_request(brushmanager.preview_w*2, brushmanager.preview_h*2)
self.tdw.scale = 2.0
tdw_box = gtk.HBox()
tdw_box.pack_start(self.tdw, expand=False, fill=False)
tdw_box.pack_start(gtk.Label(), expand=True)
self.pack_start(tdw_box, expand=False, fill=False, padding=3)
self.pack_start(button_box, expand=False, fill=False, padding=3)
self.brush_preview_edit_mode_button = b = gtk.CheckButton(_('Edit'))
b.connect('toggled', self.brush_preview_edit_mode_cb)
button_box.pack_start(b, expand=False, padding=3)
self.brush_preview_clear_button = b = gtk.Button(_('Clear'))
def clear_cb(window):
self.tdw.doc.clear_layer()
b.connect('clicked', clear_cb)
button_box.pack_start(b, expand=False, padding=3)
self.brush_preview_save_button = b = gtk.Button(_('Save'))
b.connect('clicked', self.update_preview_cb)
button_box.pack_start(b, expand=False, padding=3)
def brush_preview_edit_mode_cb(self, button):
self.set_brush_preview_edit_mode(button.get_active())
def set_brush_preview_edit_mode(self, edit_mode):
self.brush_preview_edit_mode = edit_mode
self.brush_preview_edit_mode_button.set_active(edit_mode)
self.brush_preview_save_button.set_sensitive(edit_mode)
self.brush_preview_clear_button.set_sensitive(edit_mode)
self.tdw.set_sensitive(edit_mode)
def set_preview_pixbuf(self, pixbuf):
if pixbuf is None:
self.tdw.doc.clear()
else:
self.tdw.doc.load_from_pixbuf(pixbuf)
def get_preview_pixbuf(self):
pixbuf = self.tdw.doc.render_as_pixbuf(0, 0, brushmanager.preview_w, brushmanager.preview_h)
return pixbuf
def update_preview_cb(self, window):
pixbuf = self.get_preview_pixbuf()
b = self.bm.selected_brush
if not b.name:
dialogs.error(self, _('No brush selected, please use "Add As New" instead.'))
return
b.preview = pixbuf
b.save()
for brushes in self.bm.groups.itervalues():
if b in brushes:
for f in self.bm.brushes_observers: f(brushes)
def brush_selected_cb(self, managed_brush, brushinfo):
# Update brush icon preview if it is not in edit mode
if not self.brush_preview_edit_mode:
self.set_preview_pixbuf(managed_brush.preview)
| benosteen/mypaint | gui/brushcreationwidget.py | Python | gpl-2.0 | 9,333 |
Subsets and Splits