repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
googleapis/googleapis-gen | google/cloud/automl/v1beta1/automl-v1beta1-py/google/cloud/automl_v1beta1/types/model_evaluation.py | 1 | 6961 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.automl_v1beta1.types import classification
from google.cloud.automl_v1beta1.types import detection
from google.cloud.automl_v1beta1.types import regression
from google.cloud.automl_v1beta1.types import text_extraction
from google.cloud.automl_v1beta1.types import text_sentiment
from google.cloud.automl_v1beta1.types import translation
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.automl.v1beta1',
manifest={
'ModelEvaluation',
},
)
class ModelEvaluation(proto.Message):
r"""Evaluation results of a model.
Attributes:
classification_evaluation_metrics (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics):
Model evaluation metrics for image, text,
video and tables classification.
Tables problem is considered a classification
when the target column is CATEGORY DataType.
regression_evaluation_metrics (google.cloud.automl_v1beta1.types.RegressionEvaluationMetrics):
Model evaluation metrics for Tables
regression. Tables problem is considered a
regression when the target column has FLOAT64
DataType.
translation_evaluation_metrics (google.cloud.automl_v1beta1.types.TranslationEvaluationMetrics):
Model evaluation metrics for translation.
image_object_detection_evaluation_metrics (google.cloud.automl_v1beta1.types.ImageObjectDetectionEvaluationMetrics):
Model evaluation metrics for image object
detection.
video_object_tracking_evaluation_metrics (google.cloud.automl_v1beta1.types.VideoObjectTrackingEvaluationMetrics):
Model evaluation metrics for video object
tracking.
text_sentiment_evaluation_metrics (google.cloud.automl_v1beta1.types.TextSentimentEvaluationMetrics):
Evaluation metrics for text sentiment models.
text_extraction_evaluation_metrics (google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics):
Evaluation metrics for text extraction
models.
name (str):
Output only. Resource name of the model evaluation. Format:
``projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}``
annotation_spec_id (str):
Output only. The ID of the annotation spec that the model
evaluation applies to. The The ID is empty for the overall
model evaluation. For Tables annotation specs in the dataset
do not exist and this ID is always not set, but for
CLASSIFICATION
[prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
the
[display_name][google.cloud.automl.v1beta1.ModelEvaluation.display_name]
field is used.
display_name (str):
Output only. The value of
[display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name]
at the moment when the model was trained. Because this field
returns a value at model training time, for different models
trained from the same dataset, the values may differ, since
display names could had been changed between the two model's
trainings. For Tables CLASSIFICATION
[prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
distinct values of the target column at the moment of the
model evaluation are populated here. The display_name is
empty for the overall model evaluation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this model
evaluation was created.
evaluated_example_count (int):
Output only. The number of examples used for model
evaluation, i.e. for which ground truth from time of model
creation is compared against the predicted annotations
created by the model. For overall ModelEvaluation (i.e. with
annotation_spec_id not set) this is the total number of all
examples used for evaluation. Otherwise, this is the count
of examples that according to the ground truth were
annotated by the
[annotation_spec_id][google.cloud.automl.v1beta1.ModelEvaluation.annotation_spec_id].
"""
classification_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=8,
oneof='metrics',
message=classification.ClassificationEvaluationMetrics,
)
regression_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=24,
oneof='metrics',
message=regression.RegressionEvaluationMetrics,
)
translation_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=9,
oneof='metrics',
message=translation.TranslationEvaluationMetrics,
)
image_object_detection_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=12,
oneof='metrics',
message=detection.ImageObjectDetectionEvaluationMetrics,
)
video_object_tracking_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=14,
oneof='metrics',
message=detection.VideoObjectTrackingEvaluationMetrics,
)
text_sentiment_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=11,
oneof='metrics',
message=text_sentiment.TextSentimentEvaluationMetrics,
)
text_extraction_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=13,
oneof='metrics',
message=text_extraction.TextExtractionEvaluationMetrics,
)
name = proto.Field(
proto.STRING,
number=1,
)
annotation_spec_id = proto.Field(
proto.STRING,
number=2,
)
display_name = proto.Field(
proto.STRING,
number=15,
)
create_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
evaluated_example_count = proto.Field(
proto.INT32,
number=6,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -8,033,426,745,816,520,000 | 39.947059 | 124 | 0.679787 | false |
filipp/MacHammer | tests.py | 1 | 8518 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import logging
import subprocess
from unittest import main, skip, TestCase
from machammer import (functions, system_profiler,
network, hooks, users,
screensaver, defaults,
printers, process,)
class DefaultsTestCase(TestCase):
def test_domains(self):
domains = defaults.domains()
self.assertGreater(len(domains), 1)
def test_finder_get(self):
finder = process.App('com.apple.Finder')
self.assertEqual(finder.prefs.ShowPathbar, True)
def test_finder_set(self):
finder = process.App('com.apple.Finder')
finder.prefs.ShowPathbar = False
class UsersTestCase(TestCase):
def test_nextid(self):
self.assertGreater(users.nextid(), 1)
def test_create(self):
info = users.create_user('Test User', 'testpassword')
rn = info['dsAttrTypeStandard:RecordName'][0]
self.assertEquals(rn, 'test.user')
def test_make_admin(self):
users.make_admin('test.user')
def test_delete(self):
users.create_user('Test User', 'testpassword')
users.delete_user('test.user')
class PrintersTestCase(TestCase):
def test_delete_printers(self):
printers.delete_printers()
class ProcessTestCase(TestCase):
def setUp(self):
self.appname = 'Stickies'
def test_kill(self):
process.kill(self.appname)
self.assertFalse(process.is_running(self.appname))
def test_open(self):
process.open(self.appname)
self.assertTrue(process.is_running(self.appname))
def test_activate(self):
process.activate(self.appname)
time.sleep(2)
self.assertTrue(process.is_active(self.appname))
class SystemProfilerTestCase(TestCase):
def testSerialNumber(self):
sn = system_profiler.get('Hardware', 'serial_number')
self.assertTrue(len(sn) > 8)
def testInvalidType(self):
with self.assertRaises(Exception):
system_profiler.SystemProfile('Whatever')
def testKeys(self):
self.assertTrue(len(system_profiler.keys()) > 3)
def testTypes(self):
self.assertIn('Hardware', system_profiler.types())
def testOsVersion(self):
"""
Check that the OS version we get from SP is contained
in the output of sw_vers
"""
build = subprocess.check_output(['sw_vers', '-buildVersion']).strip()
software = system_profiler.SystemProfile('Software')
self.assertIn(build, software.os_version)
def testOsVersionShortcut(self):
build = subprocess.check_output(['sw_vers', '-buildVersion']).strip()
self.assertTrue(build in system_profiler.get('Software', 'os_version'))
class NetworkTestCase(TestCase):
def test_get_computer_name(self):
name = network.get_computer_name()
self.assertEquals(name, 'lalalala')
def test_fix_computer_name(self):
name = network.fix_computer_name('Computer (2)')
self.assertEqual(name, 'Computer')
def test_wifi_enable(self):
"""Turn wifi power on, check that it's on"""
network.set_wifi_power(True)
time.sleep(3)
self.assertTrue(network.get_wifi_power())
def test_wired(self):
self.assertTrue(network.is_wired())
@skip('blaa')
def test_wifi_disable(self):
network.set_wifi_power(False)
time.sleep(3)
self.assertTrue(not network.get_wifi_power())
def test_primary(self):
self.assertEqual(network.get_primary(), 'en4')
def test_primary_wired(self):
self.assertTrue(network.is_wired(True))
class VersionsTestCase(TestCase):
def setUp(self):
self.profile = system_profiler.SystemProfile('Applications')
def testFindStickes(self):
results = self.profile.find('_name', 'Stickies')
self.assertTrue(len(results) > 0)
def testStickiesVersion(self):
results = self.profile.find('_name', 'Stickies')
if functions.os_version() >= 10.13:
self.assertEqual(results[0]['version'], '10.1')
else:
self.assertEqual(results[0]['version'], '10.0')
def testFindApplications(self):
results = self.profile.find('path', '/Applications')
self.assertTrue(len(results) > 10)
def testSystemVersion(self):
self.assertLess(functions.os_version(), 10.14)
class AppsTestCase(TestCase):
def setUp(self):
self.key = 'NSNavLastRootDirectory'
self.app = process.App('com.apple.Terminal')
def test_get_prefs(self):
path = os.path.expanduser(self.app.prefs.get(self.key))
self.assertTrue(os.path.exists(path))
def test_set_prefs(self):
self.app.prefs.set(self.key, '/tmp')
def test_is_running(self):
self.assertTrue(self.app.is_running())
def test_launch(self):
self.assertTrue(self.app.launch())
def test_quit(self):
app = process.App('com.apple.Stickies', 'Stickies')
app.launch()
#app.quit()
def test_is_active(self):
self.assertTrue(self.app.is_active())
class InstallerTestCase(TestCase):
def setUp(self):
self.pkg = os.getenv('MH_PKG')
self.image = os.getenv('MH_IMAGE')
def test_mount_and_install(self):
functions.mount_and_install(self.image, self.pkg)
class MountTestCase(TestCase):
def setUp(self):
self.mp = None
self.url = os.getenv('MH_URL')
self.image = os.getenv('MH_IMAGE')
def test_local_dmg(self):
with functions.mount(self.image) as p:
self.assertIn('/var/folders', p)
def test_mount_url(self):
with functions.fetch(self.url, '-L', '-o', self.image) as image:
with functions.mount(image) as mp:
self.assertTrue(os.path.isdir(mp))
# output file should still be there when set manually
self.assertTrue(os.path.exists(self.image))
def test_mount_url_temp(self):
with functions.fetch(self.url, '-L') as image:
self.image = image
with functions.mount(image) as mp:
self.assertTrue(os.path.isdir(mp))
self.mp = mp
self.assertFalse(os.path.isdir(self.mp))
# output file shouldn't be there when not set
self.assertFalse(os.path.exists(self.image))
class FunctionsTestCase(TestCase):
def setUp(self):
self.url = os.getenv('MH_URL')
self.stickes = '/Applications/Stickies.app'
def test_notification(self):
functions.display_notification('blaaa "lalala"')
def test_add_login_item(self):
users.add_login_item(self.stickes)
def test_remove_login_item(self):
users.remove_login_item(path=self.stickes)
@skip('This works, trust me.')
def test_create_media(self):
functions.create_os_media('/Applications/Install macOS Sierra.app',
'/Volumes/Untitled')
@skip('This works, trust me.')
def test_sleep(self):
functions.sleep()
def test_curl(self):
p = functions.curl(os.getenv('MH_URL'))
self.assertTrue(os.path.exists(p))
class ScreenSaverTestCase(TestCase):
def test_set_invalid(self):
with self.assertRaises(Exception):
screensaver.set('Blalala')
def test_set_flurry(self):
self.assertEquals(screensaver.set('Flurry'), None)
def test_get(self):
self.assertEquals(screensaver.get(), 'Flurry')
class HooksTestCase(TestCase):
def gethook(self):
return defaults.get(hooks.PREF_DOMAIN, 'LoginHook')
def test_set_login_path(self):
hooks.login('/lalala')
self.assertEquals(self.gethook(), '/lalala')
def test_set_login_decorator(self):
from machammer.decorators import login
@login
def blaa():
import sys
import subprocess
subprocess.call(['/usr/bin/say', 'Hello ' + sys.argv[1]])
blaa()
self.assertEquals(self.gethook(), '/var/root/Library/mh_loginhook.py')
def test_launchagent(self):
pass
def test_unset_login(self):
hooks.login()
with self.assertRaises(Exception):
self.assertEquals(self.gethook(), '')
if __name__ == '__main__':
loglevel = logging.DEBUG if os.getenv('MH_DEBUG') else logging.WARNING
logging.basicConfig(level=loglevel)
main()
| bsd-2-clause | -6,189,115,011,633,411,000 | 28.071672 | 79 | 0.623268 | false |
MDAnalysis/mdanalysis | benchmarks/benchmarks/analysis/rdf.py | 1 | 1204 | import MDAnalysis
try:
from MDAnalysisTests.datafiles import TPR, XTC
except:
pass
try:
from MDAnalysis.analysis.rdf import InterRDF
except:
pass
class SimpleRdfBench(object):
"""Benchmarks for MDAnalysis.analysis.rdf
"""
params = ([20,75,200],
[[0,5], [0,15], [0,20]],
[1, 100, 1000, 10000])
param_names = ['nbins',
'range_val',
'natoms']
def setup(self, nbins, range_val, natoms):
self.sel_str = 'name OW'
self.u = MDAnalysis.Universe(TPR, XTC)
try:
self.sel = self.u.select_atoms(self.sel_str)[:natoms]
except AttributeError:
self.sel = self.u.selectAtoms(self.sel_str)[:natoms]
# do not include initialization of the
# InterRDF object in the benchmark itself
self.rdf = InterRDF(g1=self.sel,
g2=self.sel,
nbins=nbins,
range=range_val)
def time_interrdf(self, nbins, range_val, natoms):
"""Benchmark a full trajectory parse
by MDAnalysis.analysis.rdf.InterRDF
"""
self.rdf.run()
| gpl-2.0 | -4,898,817,287,443,108,000 | 24.083333 | 65 | 0.539037 | false |
xl1994/tfConvo | genImg.py | 1 | 3379 | # -*- Coding: UTF-8 -*-
import os
from captcha.image import ImageCaptcha
import numpy as np
from PIL import Image
import tensorflow as tf
import random
# import time
NUMBER = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
ALPHABET_LOWER = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
ALPHABET_UPPER = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',\
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def gen_captcha_text(char_set=NUMBER+ALPHABET_LOWER+ALPHABET_UPPER, captcha_size=4):
'''
NOT WELL DEFINED YET
'''
CAPTCHA_TEXT = []
for i in range(captcha_size):
C = random.choice(char_set)
CAPTCHA_TEXT.append(C)
return CAPTCHA_TEXT
def gen_captcha_data(captcha_text):
'''
NOT WELL DEFINED YET
'''
img = ImageCaptcha()
captcha_text = ' '.join(captcha_text)
captcha_data = img.generate(captcha_text)
captcha_data = Image.open(captcha_data)
captcha_data = np.array(captcha_data)
return captcha_text, captcha_data
# IMAGE DATE TO TFRECORDS
def img_to_tfrecords(output_filename, input_directory, classes, width=128, height=128):
'''
CLASS OF IMAGE
'''
writer = tf.python_io.TFRecordWriter(output_filename)
for index, name in enumerate(classes):
class_path = input_directory + '/' + name
for img_name in os.listdir(class_path):
img_path = class_path + '/' + img_name
img = Image.open(img_path)
# img = img.resize(width, height)
img_raw = img.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={\
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),\
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))}))
writer.write(example.SerializeToString())
writer.close()
return output_filename
def imgGen(num, charset, dstr):
flag = True
class_set = set()
try:
for item in charset:
classes_path = os.getcwd() + '/' + dstr + '/' + item
if not item in class_set:
class_set.add(item)
else:
continue
if not os.path.exists(classes_path):
os.makedirs(dstr+ '/' + item)
for i in range(num):
FILE_NAME = classes_path + '/label_' + str(i) + '.jpg'
ImageCaptcha().write(item, FILE_NAME)
img = Image.open(FILE_NAME)
region = (0,0,img.size[0]/4,img.size[1])
img = img.crop(region)
img.save(FILE_NAME)
except Exception as e:
print str(e)
flag = False
return flag
def imgTrain(num, charset):
return imgGen(num, charset, 'train')
def imgValidation(num, charset):
return imgGen(num, charset, 'valid')
if __name__ == '__main__':
# number of sample each character
num_train = 400
num_valid = 80
charset = NUMBER + ALPHABET_LOWER + ALPHABET_UPPER
if imgTrain(num_train, charset):
print 'Train: each charatcter',num_train,'images generated!'
if imgValidation(num_valid, charset):
print 'Validation: each charatcter',num_valid,'images generated!'
| mit | -1,317,025,532,587,306,800 | 34.197917 | 94 | 0.553122 | false |
nugget/python-insteonplm | insteonplm/messages/setImConfiguration.py | 1 | 1698 | """INSTEON Set IM Configuration Message."""
from insteonplm.messages.message import Message
from insteonplm.constants import (
MESSAGE_SET_IM_CONFIGURATION_0X6B,
MESSAGE_SET_IM_CONFIGURATION_SIZE,
MESSAGE_SET_IM_CONFIGURATION_RECEIVED_SIZE,
MESSAGE_ACK,
MESSAGE_NAK,
)
class SetIMConfiguration(Message):
"""INSTEON Get Insteon Modem Info Message.
Message type 0x60
"""
_code = MESSAGE_SET_IM_CONFIGURATION_0X6B
_sendSize = MESSAGE_SET_IM_CONFIGURATION_SIZE
_receivedSize = MESSAGE_SET_IM_CONFIGURATION_RECEIVED_SIZE
_description = "INSTEON Set IM Configuration Message"
def __init__(self, flags=None, acknak=None):
"""Init the GetImInfo Class."""
self._imConfigurationFlags = flags
self._acknak = self._setacknak(acknak)
@classmethod
def from_raw_message(cls, rawmessage):
"""Create message from raw byte stream."""
return SetIMConfiguration(rawmessage[2], rawmessage[3])
@property
def imConfigurationFlags(self):
"""Return the IM configuration flags."""
return self._imConfigurationFlags
@property
def acknak(self):
"""Return the ACK/NAK byte."""
return self._acknak
@property
def isack(self):
"""Test if this is an ACK message."""
return self._acknak is not None and self._acknak == MESSAGE_ACK
@property
def isnak(self):
"""Test if this is a NAK message."""
return self._acknak is not None and self._acknak == MESSAGE_NAK
def _message_properties(self):
return [
{"imConfigurationFlags": self._imConfigurationFlags},
{"acknak": self.acknak},
]
| mit | 5,503,579,786,581,197,000 | 28.275862 | 71 | 0.651943 | false |
ImageEngine/gaffer | python/GafferTest/MetadataTest.py | 1 | 43314 | ##########################################################################
#
# Copyright (c) 2013-2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import subprocess
import os
import imath
import six
import IECore
import Gaffer
import GafferTest
class MetadataTest( GafferTest.TestCase ) :
class DerivedAddNode( GafferTest.AddNode ) :
def __init__( self, name="DerivedAddNode" ) :
GafferTest.AddNode.__init__( self, name )
IECore.registerRunTimeTyped( DerivedAddNode )
def testPlugDescription( self ) :
add = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.value( add["op1"], "description" ), None )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "description", "The first operand" )
self.assertEqual( Gaffer.Metadata.value( add["op1"], "description" ), "The first operand" )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "description", lambda plug : plug.getName() + " description" )
self.assertEqual( Gaffer.Metadata.value( add["op1"], "description" ), "op1 description" )
derivedAdd = self.DerivedAddNode()
self.assertEqual( Gaffer.Metadata.value( derivedAdd["op1"], "description" ), "op1 description" )
Gaffer.Metadata.registerValue( self.DerivedAddNode, "op*", "description", "derived class description" )
self.assertEqual( Gaffer.Metadata.value( derivedAdd["op1"], "description" ), "derived class description" )
self.assertEqual( Gaffer.Metadata.value( derivedAdd["op2"], "description" ), "derived class description" )
self.assertEqual( Gaffer.Metadata.value( add["op1"], "description" ), "op1 description" )
self.assertEqual( Gaffer.Metadata.value( add["op2"], "description" ), None )
def testArbitraryValues( self ) :
add = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.value( add, "aKey" ), None )
self.assertEqual( Gaffer.Metadata.value( add["op1"], "aKey" ), None )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "aKey", "something" )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op*", "aKey", "somethingElse" )
self.assertEqual( Gaffer.Metadata.value( add, "aKey" ), "something" )
self.assertEqual( Gaffer.Metadata.value( add["op1"], "aKey" ), "somethingElse" )
def testInheritance( self ) :
Gaffer.Metadata.registerValue( GafferTest.AddNode, "iKey", "Base class value" )
derivedAdd = self.DerivedAddNode()
self.assertEqual( Gaffer.Metadata.value( derivedAdd, "iKey" ), "Base class value" )
Gaffer.Metadata.registerValue( self.DerivedAddNode, "iKey", "Derived class value" )
self.assertEqual( Gaffer.Metadata.value( derivedAdd, "iKey" ), "Derived class value" )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "iKey", "Base class plug value" )
self.assertEqual( Gaffer.Metadata.value( derivedAdd["op1"], "iKey" ), "Base class plug value" )
Gaffer.Metadata.registerValue( self.DerivedAddNode, "op1", "iKey", "Derived class plug value" )
self.assertEqual( Gaffer.Metadata.value( derivedAdd["op1"], "iKey" ), "Derived class plug value" )
def testNodeSignals( self ) :
ns = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
ps = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "k", "something" )
self.assertEqual( len( ps ), 0 )
self.assertEqual( len( ns ), 1 )
self.assertEqual( ns[0], ( GafferTest.AddNode.staticTypeId(), "k", None ) )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "k", "somethingElse" )
self.assertEqual( len( ps ), 0 )
self.assertEqual( len( ns ), 2 )
self.assertEqual( ns[1], ( GafferTest.AddNode.staticTypeId(), "k", None ) )
def testPlugSignals( self ) :
ns = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
ps = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "k", "something" )
self.assertEqual( len( ps ), 1 )
self.assertEqual( len( ns ), 0 )
self.assertEqual( ps[0], ( GafferTest.AddNode.staticTypeId(), "op1", "k", None ) )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "k", "somethingElse" )
self.assertEqual( len( ps ), 2 )
self.assertEqual( len( ns ), 0 )
self.assertEqual( ps[1], ( GafferTest.AddNode.staticTypeId(), "op1", "k", None ) )
def testSignalsDontExposeInternedStrings( self ) :
cs = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "k", "aaa" )
self.assertTrue( type( cs[0][1] ) is str )
cs = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "k", "bbb" )
self.assertTrue( type( cs[0][1] ) is str )
self.assertTrue( type( cs[0][2] ) is str )
def testInstanceMetadata( self ) :
Gaffer.Metadata.registerValue( GafferTest.AddNode, "imt", "globalNodeValue" )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "imt", "globalPlugValue" )
n = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.value( n, "imt" ), "globalNodeValue" )
self.assertEqual( Gaffer.Metadata.value( n["op1"], "imt" ), "globalPlugValue" )
Gaffer.Metadata.registerValue( n, "imt", "instanceNodeValue" )
Gaffer.Metadata.registerValue( n["op1"], "imt", "instancePlugValue" )
self.assertEqual( Gaffer.Metadata.value( n, "imt" ), "instanceNodeValue" )
self.assertEqual( Gaffer.Metadata.value( n["op1"], "imt" ), "instancePlugValue" )
Gaffer.Metadata.registerValue( n, "imt", None )
Gaffer.Metadata.registerValue( n["op1"], "imt", None )
self.assertEqual( Gaffer.Metadata.value( n, "imt" ), None )
self.assertEqual( Gaffer.Metadata.value( n["op1"], "imt" ), None )
def testInstanceMetadataUndo( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.value( s["n"], "undoTest" ), None )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "undoTest" ), None )
with Gaffer.UndoScope( s ) :
Gaffer.Metadata.registerValue( s["n"], "undoTest", "instanceNodeValue" )
Gaffer.Metadata.registerValue( s["n"]["op1"], "undoTest", "instancePlugValue" )
self.assertEqual( Gaffer.Metadata.value( s["n"], "undoTest" ), "instanceNodeValue" )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "undoTest" ), "instancePlugValue" )
with Gaffer.UndoScope( s ) :
Gaffer.Metadata.registerValue( s["n"], "undoTest", "instanceNodeValue2" )
Gaffer.Metadata.registerValue( s["n"]["op1"], "undoTest", "instancePlugValue2" )
self.assertEqual( Gaffer.Metadata.value( s["n"], "undoTest" ), "instanceNodeValue2" )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "undoTest" ), "instancePlugValue2" )
s.undo()
self.assertEqual( Gaffer.Metadata.value( s["n"], "undoTest" ), "instanceNodeValue" )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "undoTest" ), "instancePlugValue" )
s.undo()
self.assertEqual( Gaffer.Metadata.value( s["n"], "undoTest" ), None )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "undoTest" ), None )
s.redo()
self.assertEqual( Gaffer.Metadata.value( s["n"], "undoTest" ), "instanceNodeValue" )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "undoTest" ), "instancePlugValue" )
s.redo()
self.assertEqual( Gaffer.Metadata.value( s["n"], "undoTest" ), "instanceNodeValue2" )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "undoTest" ), "instancePlugValue2" )
def testInstanceMetadataSignals( self ) :
n = GafferTest.AddNode()
ncs = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
pcs = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
Gaffer.Metadata.registerValue( n, "signalTest", 1 )
Gaffer.Metadata.registerValue( n["op1"], "signalTest", 1 )
self.assertEqual( len( ncs ), 1 )
self.assertEqual( len( pcs ), 1 )
self.assertEqual( ncs[0], ( GafferTest.AddNode.staticTypeId(), "signalTest", n ) )
self.assertEqual( pcs[0], ( GafferTest.AddNode.staticTypeId(), "op1", "signalTest", n["op1"] ) )
Gaffer.Metadata.registerValue( n, "signalTest", 1 )
Gaffer.Metadata.registerValue( n["op1"], "signalTest", 1 )
self.assertEqual( len( ncs ), 1 )
self.assertEqual( len( pcs ), 1 )
Gaffer.Metadata.registerValue( n, "signalTest", 2 )
Gaffer.Metadata.registerValue( n["op1"], "signalTest", 2 )
self.assertEqual( len( ncs ), 2 )
self.assertEqual( len( pcs ), 2 )
self.assertEqual( ncs[1], ( GafferTest.AddNode.staticTypeId(), "signalTest", n ) )
self.assertEqual( pcs[1], ( GafferTest.AddNode.staticTypeId(), "op1", "signalTest", n["op1"] ) )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
Gaffer.Metadata.registerValue( s["n"], "serialisationTest", 1 )
Gaffer.Metadata.registerValue( s["n"]["op1"], "serialisationTest", 2 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( Gaffer.Metadata.value( s2["n"], "serialisationTest" ), 1 )
self.assertEqual( Gaffer.Metadata.value( s2["n"]["op1"], "serialisationTest" ), 2 )
def testStringSerialisationWithNewlinesAndQuotes( self ) :
trickyStrings = [
"Paragraph 1\n\nParagraph 2",
"'Quote'",
"Apostrophe's",
'"Double quote"',
]
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
for s in trickyStrings :
p = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["n"]["user"].addChild( p )
Gaffer.Metadata.registerValue( p, "description", s )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
for p, s in zip( script2["n"]["user"].children(), trickyStrings ) :
self.assertEqual( Gaffer.Metadata.value( p, "description" ), s )
def testRegisteredValues( self ) :
n = GafferTest.AddNode()
self.assertTrue( "r" not in Gaffer.Metadata.registeredValues( n ) )
self.assertTrue( "rp" not in Gaffer.Metadata.registeredValues( n["op1"] ) )
self.assertTrue( "ri" not in Gaffer.Metadata.registeredValues( n ) )
self.assertTrue( "rpi" not in Gaffer.Metadata.registeredValues( n["op1"] ) )
Gaffer.Metadata.registerValue( n.staticTypeId(), "r", 10 )
Gaffer.Metadata.registerValue( n.staticTypeId(), "op1", "rp", 20 )
self.assertTrue( "r" in Gaffer.Metadata.registeredValues( n ) )
self.assertTrue( "rp" in Gaffer.Metadata.registeredValues( n["op1"] ) )
self.assertTrue( "ri" not in Gaffer.Metadata.registeredValues( n ) )
self.assertTrue( "rpi" not in Gaffer.Metadata.registeredValues( n["op1"] ) )
Gaffer.Metadata.registerValue( n, "ri", 10 )
Gaffer.Metadata.registerValue( n["op1"], "rpi", 20 )
self.assertTrue( "r" in Gaffer.Metadata.registeredValues( n ) )
self.assertTrue( "rp" in Gaffer.Metadata.registeredValues( n["op1"] ) )
self.assertTrue( "ri" in Gaffer.Metadata.registeredValues( n ) )
self.assertTrue( "rpi" in Gaffer.Metadata.registeredValues( n["op1"] ) )
self.assertTrue( "r" not in Gaffer.Metadata.registeredValues( n, instanceOnly=True ) )
self.assertTrue( "rp" not in Gaffer.Metadata.registeredValues( n["op1"], instanceOnly=True ) )
self.assertTrue( "ri" in Gaffer.Metadata.registeredValues( n ) )
self.assertTrue( "rpi" in Gaffer.Metadata.registeredValues( n["op1"] ) )
def testInstanceDestruction( self ) :
for i in range( 0, 1000 ) :
p = Gaffer.Plug()
n = Gaffer.Node()
self.assertEqual( Gaffer.Metadata.value( p, "destructionTest" ), None )
self.assertEqual( Gaffer.Metadata.value( n, "destructionTest" ), None )
Gaffer.Metadata.registerValue( p, "destructionTest", 10 )
Gaffer.Metadata.registerValue( n, "destructionTest", 20 )
self.assertEqual( Gaffer.Metadata.value( p, "destructionTest" ), 10 )
self.assertEqual( Gaffer.Metadata.value( n, "destructionTest" ), 20 )
del p
del n
def testOrder( self ) :
class MetadataTestNodeA( Gaffer.Node ) :
def __init__( self, name = "MetadataTestNodeOne" ) :
Gaffer.Node.__init__( self, name )
self["a"] = Gaffer.IntPlug()
IECore.registerRunTimeTyped( MetadataTestNodeA )
class MetadataTestNodeB( MetadataTestNodeA ) :
def __init__( self, name = "MetadataTestNodeOne" ) :
MetadataTestNodeA.__init__( self, name )
IECore.registerRunTimeTyped( MetadataTestNodeB )
# test node registrations
node = MetadataTestNodeB()
preExistingRegistrations = Gaffer.Metadata.registeredValues( node )
Gaffer.Metadata.registerValue( node, "nodeSeven", 7 )
Gaffer.Metadata.registerValue( node, "nodeEight", 8 )
Gaffer.Metadata.registerValue( node, "nodeNine", 9 )
Gaffer.Metadata.registerValue( MetadataTestNodeB, "nodeFour", 4 )
Gaffer.Metadata.registerValue( MetadataTestNodeB, "nodeFive", 5 )
Gaffer.Metadata.registerValue( MetadataTestNodeB, "nodeSix", 6 )
Gaffer.Metadata.registerValue( MetadataTestNodeA, "nodeOne", 1 )
Gaffer.Metadata.registerValue( MetadataTestNodeA, "nodeTwo", 2 )
Gaffer.Metadata.registerValue( MetadataTestNodeA, "nodeThree", 3 )
self.assertEqual(
Gaffer.Metadata.registeredValues( node ),
preExistingRegistrations + [
# base class values first, in order of their registration
"nodeOne",
"nodeTwo",
"nodeThree",
# derived class values next, in order of their registration
"nodeFour",
"nodeFive",
"nodeSix",
# instance values last, in order of their registration
"nodeSeven",
"nodeEight",
"nodeNine",
]
)
# test plug registrations
preExistingRegistrations = Gaffer.Metadata.registeredValues( node["a"] )
Gaffer.Metadata.registerValue( node["a"], "plugSeven", 7 )
Gaffer.Metadata.registerValue( node["a"], "plugEight", 8 )
Gaffer.Metadata.registerValue( node["a"], "plugNine", 9 )
Gaffer.Metadata.registerValue( MetadataTestNodeB, "a", "plugFour", 4 )
Gaffer.Metadata.registerValue( MetadataTestNodeB, "a", "plugFive", 5 )
Gaffer.Metadata.registerValue( MetadataTestNodeB, "a", "plugSix", 6 )
Gaffer.Metadata.registerValue( MetadataTestNodeA, "a", "plugOne", 1 )
Gaffer.Metadata.registerValue( MetadataTestNodeA, "a", "plugTwo", 2 )
Gaffer.Metadata.registerValue( MetadataTestNodeA, "a", "plugThree", 3 )
self.assertEqual(
Gaffer.Metadata.registeredValues( node["a"] ),
preExistingRegistrations + [
# base class values first, in order of their registration
"plugOne",
"plugTwo",
"plugThree",
# derived class values next, in order of their registration
"plugFour",
"plugFive",
"plugSix",
# instance values last, in order of their registration
"plugSeven",
"plugEight",
"plugNine",
]
)
def testThreading( self ) :
GafferTest.testMetadataThreading()
def testVectorTypes( self ) :
n = Gaffer.Node()
Gaffer.Metadata.registerValue( n, "stringVector", IECore.StringVectorData( [ "a", "b", "c" ] ) )
self.assertEqual( Gaffer.Metadata.value( n, "stringVector" ), IECore.StringVectorData( [ "a", "b", "c" ] ) )
Gaffer.Metadata.registerValue( n, "intVector", IECore.IntVectorData( [ 1, 2, 3 ] ) )
self.assertEqual( Gaffer.Metadata.value( n, "intVector" ), IECore.IntVectorData( [ 1, 2, 3 ] ) )
def testCopy( self ) :
n = Gaffer.Node()
s = IECore.StringVectorData( [ "a", "b", "c" ] )
Gaffer.Metadata.registerValue( n, "stringVector", s )
s2 = Gaffer.Metadata.value( n, "stringVector" )
self.assertEqual( s, s2 )
self.assertFalse( s.isSame( s2 ) )
s3 = Gaffer.Metadata.value( n, "stringVector", _copy = False )
self.assertEqual( s, s3 )
self.assertTrue( s.isSame( s3 ) )
def testBadSlotsDontAffectGoodSlots( self ) :
def badSlot( nodeTypeId, key, node ) :
raise Exception( "Oops" )
self.__goodSlotExecuted = False
def goodSlot( nodeTypeId, key, node ) :
self.__goodSlotExecuted = True
badConnection = Gaffer.Metadata.nodeValueChangedSignal().connect( badSlot )
goodConnection = Gaffer.Metadata.nodeValueChangedSignal().connect( goodSlot )
n = Gaffer.Node()
with IECore.CapturingMessageHandler() as mh :
Gaffer.Metadata.registerValue( n, "test", 10 )
self.assertTrue( self.__goodSlotExecuted )
self.assertEqual( len( mh.messages ), 1 )
self.assertTrue( "Oops" in mh.messages[0].message )
def testRegisterNode( self ) :
class MetadataTestNodeC( Gaffer.Node ) :
def __init__( self, name = "MetadataTestNodeC" ) :
Gaffer.Node.__init__( self, name )
self["a"] = Gaffer.IntPlug()
self["b"] = Gaffer.IntPlug()
IECore.registerRunTimeTyped( MetadataTestNodeC )
n = MetadataTestNodeC()
preExistingRegistrations = Gaffer.Metadata.registeredValues( n["a"] )
Gaffer.Metadata.registerNode(
MetadataTestNodeC,
"description",
"""
I am a multi
line description
""",
"nodeGadget:color", imath.Color3f( 1, 0, 0 ),
plugs = {
"a" : [
"description",
"""Another multi
line description""",
"preset:One", 1,
"preset:Two", 2,
"preset:Three", 3,
],
"b" : (
"description",
"""
I am the first paragraph.
I am the second paragraph.
""",
"otherValue", 100,
)
}
)
self.assertEqual( Gaffer.Metadata.value( n, "description" ), "I am a multi\nline description" )
self.assertEqual( Gaffer.Metadata.value( n, "nodeGadget:color" ), imath.Color3f( 1, 0, 0 ) )
self.assertEqual( Gaffer.Metadata.value( n["a"], "description" ), "Another multi\nline description" )
self.assertEqual( Gaffer.Metadata.value( n["a"], "preset:One" ), 1 )
self.assertEqual( Gaffer.Metadata.value( n["a"], "preset:Two" ), 2 )
self.assertEqual( Gaffer.Metadata.value( n["a"], "preset:Three" ), 3 )
self.assertEqual(
Gaffer.Metadata.registeredValues( n["a"] ),
preExistingRegistrations + [ "description", "preset:One", "preset:Two", "preset:Three" ]
)
self.assertEqual( Gaffer.Metadata.value( n["b"], "description" ), "I am the first paragraph.\n\nI am the second paragraph." )
self.assertEqual( Gaffer.Metadata.value( n["b"], "otherValue" ), 100 )
def testPersistenceOfInstanceValues( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
Gaffer.Metadata.registerValue( s["n"], "persistent1", 1 )
Gaffer.Metadata.registerValue( s["n"], "persistent2", 2, persistent = True )
Gaffer.Metadata.registerValue( s["n"], "nonpersistent", 3, persistent = False )
Gaffer.Metadata.registerValue( s["n"]["op1"], "persistent1", "one" )
Gaffer.Metadata.registerValue( s["n"]["op1"], "persistent2", "two", persistent = True )
Gaffer.Metadata.registerValue( s["n"]["op1"], "nonpersistent", "three", persistent = False )
self.assertEqual( Gaffer.Metadata.value( s["n"], "persistent1" ), 1 )
self.assertEqual( Gaffer.Metadata.value( s["n"], "persistent2" ), 2 )
self.assertEqual( Gaffer.Metadata.value( s["n"], "nonpersistent" ), 3 )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "persistent1" ), "one" )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "persistent2" ), "two" )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "nonpersistent" ), "three" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( Gaffer.Metadata.value( s2["n"], "persistent1" ), 1 )
self.assertEqual( Gaffer.Metadata.value( s2["n"], "persistent2" ), 2 )
self.assertEqual( Gaffer.Metadata.value( s2["n"], "nonpersistent" ), None )
self.assertEqual( Gaffer.Metadata.value( s2["n"]["op1"], "persistent1" ), "one" )
self.assertEqual( Gaffer.Metadata.value( s2["n"]["op1"], "persistent2" ), "two" )
self.assertEqual( Gaffer.Metadata.value( s2["n"]["op1"], "nonpersistent" ), None )
def testUndoOfPersistentInstanceValues( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
def assertNonExistent() :
self.assertEqual( Gaffer.Metadata.value( s["n"], "a" ), None )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "b" ), None )
def assertPersistent() :
self.assertEqual( Gaffer.Metadata.registeredValues( s["n"], instanceOnly = True ), [ "a" ] )
self.assertEqual( Gaffer.Metadata.registeredValues( s["n"]["op1"], instanceOnly = True ), [ "b" ] )
self.assertEqual( Gaffer.Metadata.registeredValues( s["n"], instanceOnly = True, persistentOnly = True ), [ "a" ] )
self.assertEqual( Gaffer.Metadata.registeredValues( s["n"]["op1"], instanceOnly = True, persistentOnly = True ), [ "b" ] )
self.assertEqual( Gaffer.Metadata.value( s["n"], "a" ), 1 )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "b" ), 2 )
def assertNonPersistent() :
self.assertEqual( Gaffer.Metadata.registeredValues( s["n"], instanceOnly = True ), [ "a" ] )
self.assertEqual( Gaffer.Metadata.registeredValues( s["n"]["op1"], instanceOnly = True ), [ "b" ] )
self.assertEqual( Gaffer.Metadata.registeredValues( s["n"], instanceOnly = True, persistentOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.registeredValues( s["n"]["op1"], instanceOnly = True, persistentOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.value( s["n"], "a" ), 1 )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "b" ), 2 )
assertNonExistent()
with Gaffer.UndoScope( s ) :
Gaffer.Metadata.registerValue( s["n"], "a", 1, persistent = True )
Gaffer.Metadata.registerValue( s["n"]["op1"], "b", 2, persistent = True )
assertPersistent()
with Gaffer.UndoScope( s ) :
Gaffer.Metadata.registerValue( s["n"], "a", 1, persistent = False )
Gaffer.Metadata.registerValue( s["n"]["op1"], "b", 2, persistent = False )
assertNonPersistent()
s.undo()
assertPersistent()
s.undo()
assertNonExistent()
s.redo()
assertPersistent()
s.redo()
assertNonPersistent()
def testChangeOfPersistenceSignals( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
ncs = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
pcs = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
self.assertEqual( len( ncs ), 0 )
self.assertEqual( len( pcs ), 0 )
Gaffer.Metadata.registerValue( s["n"], "a", 1, persistent = False )
Gaffer.Metadata.registerValue( s["n"]["op1"], "b", 2, persistent = False )
self.assertEqual( len( ncs ), 1 )
self.assertEqual( len( pcs ), 1 )
Gaffer.Metadata.registerValue( s["n"], "a", 1, persistent = True )
Gaffer.Metadata.registerValue( s["n"]["op1"], "b", 2, persistent = True )
self.assertEqual( len( ncs ), 2 )
self.assertEqual( len( pcs ), 2 )
Gaffer.Metadata.registerValue( s["n"], "a", 1, persistent = False )
Gaffer.Metadata.registerValue( s["n"]["op1"], "b", 2, persistent = False )
self.assertEqual( len( ncs ), 3 )
self.assertEqual( len( pcs ), 3 )
def testExactPreferredToWildcards( self ) :
class MetadataTestNodeD( Gaffer.Node ) :
def __init__( self, name = "MetadataTestNodeD" ) :
Gaffer.Node.__init__( self, name )
self["a"] = Gaffer.IntPlug()
self["b"] = Gaffer.IntPlug()
IECore.registerRunTimeTyped( MetadataTestNodeD )
Gaffer.Metadata.registerNode(
MetadataTestNodeD,
plugs = {
"*" : [
"test", "wildcard",
],
"a" :[
"test", "exact",
],
}
)
n = MetadataTestNodeD()
self.assertEqual( Gaffer.Metadata.value( n["a"], "test" ), "exact" )
self.assertEqual( Gaffer.Metadata.value( n["b"], "test" ), "wildcard" )
def testNoSerialiseAfterUndo( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
self.assertFalse( "test" in s.serialise() )
with Gaffer.UndoScope( s ) :
Gaffer.Metadata.registerValue( s["n"], "test", 1 )
self.assertTrue( "test" in s.serialise() )
s.undo()
self.assertFalse( "test" in s.serialise() )
def testNoneMasksOthers( self ) :
n = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.value( n, "maskTest" ), None )
Gaffer.Metadata.registerValue( Gaffer.DependencyNode, "maskTest", 10 )
self.assertEqual( Gaffer.Metadata.value( n, "maskTest" ), 10 )
Gaffer.Metadata.registerValue( Gaffer.ComputeNode, "maskTest", None )
self.assertEqual( Gaffer.Metadata.value( n, "maskTest" ), None )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "maskTest", 20 )
self.assertEqual( Gaffer.Metadata.value( n, "maskTest" ), 20 )
Gaffer.Metadata.registerValue( n, "maskTest", 30 )
self.assertEqual( Gaffer.Metadata.value( n, "maskTest" ), 30 )
Gaffer.Metadata.registerValue( n, "maskTest", None )
self.assertEqual( Gaffer.Metadata.value( n, "maskTest" ), None )
def testDeregisterNodeValue( self ) :
n = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.value( n, "deleteMe" ), None )
Gaffer.Metadata.registerValue( Gaffer.Node, "deleteMe", 10 )
self.assertEqual( Gaffer.Metadata.value( n, "deleteMe" ), 10 )
Gaffer.Metadata.registerValue( Gaffer.ComputeNode, "deleteMe", 20 )
self.assertEqual( Gaffer.Metadata.value( n, "deleteMe" ), 20 )
Gaffer.Metadata.deregisterValue( Gaffer.ComputeNode, "deleteMe" )
self.assertEqual( Gaffer.Metadata.value( n, "deleteMe" ), 10 )
Gaffer.Metadata.deregisterValue( Gaffer.Node, "deleteMe" )
self.assertEqual( Gaffer.Metadata.value( n, "deleteMe" ), None )
def testDeregisterNodeInstanceValue( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.value( s["n"], "deleteMe" ), None )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "deleteMe", 10 )
self.assertEqual( Gaffer.Metadata.value( s["n"], "deleteMe" ), 10 )
Gaffer.Metadata.registerValue( s["n"], "deleteMe", 20 )
self.assertEqual( Gaffer.Metadata.value( s["n"], "deleteMe" ), 20 )
self.assertTrue( "Metadata" in s.serialise() )
with Gaffer.UndoScope( s ) :
Gaffer.Metadata.deregisterValue( s["n"], "deleteMe" )
self.assertTrue( "deleteMe" not in s.serialise() )
self.assertEqual( Gaffer.Metadata.value( s["n"], "deleteMe" ), 10 )
s.undo()
self.assertEqual( Gaffer.Metadata.value( s["n"], "deleteMe" ), 20 )
self.assertTrue( "deleteMe" in s.serialise() )
s.redo()
self.assertEqual( Gaffer.Metadata.value( s["n"], "deleteMe" ), 10 )
self.assertTrue( "deleteMe" not in s.serialise() )
Gaffer.Metadata.deregisterValue( GafferTest.AddNode, "deleteMe" )
self.assertEqual( Gaffer.Metadata.value( s["n"], "deleteMe" ), None )
self.assertTrue( "deleteMe" not in s.serialise() )
def testDeregisterPlugValue( self ) :
n = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.value( n["op1"], "deleteMe" ), None )
Gaffer.Metadata.registerValue( Gaffer.Node, "op1", "deleteMe", 10 )
self.assertEqual( Gaffer.Metadata.value( n["op1"], "deleteMe" ), 10 )
Gaffer.Metadata.deregisterValue( Gaffer.Node, "op1", "deleteMe" )
self.assertEqual( Gaffer.Metadata.value( n["op1"], "deleteMe" ), None )
def testDeregisterPlugInstanceValue( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "deleteMe" ), None )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "deleteMe", 10 )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "deleteMe" ), 10 )
self.assertTrue( "deleteMe" not in s.serialise() )
Gaffer.Metadata.registerValue( s["n"]["op1"], "deleteMe", 20 )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "deleteMe" ), 20 )
self.assertTrue( "deleteMe" in s.serialise() )
with Gaffer.UndoScope( s ) :
Gaffer.Metadata.deregisterValue( s["n"]["op1"], "deleteMe" )
self.assertTrue( "deleteMe" not in s.serialise() )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "deleteMe" ), 10 )
s.undo()
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "deleteMe" ), 20 )
self.assertTrue( "deleteMe" in s.serialise() )
s.redo()
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "deleteMe" ), 10 )
self.assertTrue( "deleteMe" not in s.serialise() )
Gaffer.Metadata.deregisterValue( GafferTest.AddNode, "op1", "deleteMe" )
self.assertEqual( Gaffer.Metadata.value( s["n"]["op1"], "deleteMe" ), None )
self.assertTrue( "deleteMe" not in s.serialise() )
def testComponentsWithMetaData( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n3"] = GafferTest.AddNode()
s["m"] = GafferTest.MultiplyNode()
self.assertEqual( Gaffer.Metadata.nodesWithMetadata( s, "nodeData1"), [] )
# register instance node values on n and n2:
Gaffer.Metadata.registerValue( s["n"], "nodeData1", "something" )
Gaffer.Metadata.registerValue( s["n2"], "nodeData2", "something" )
Gaffer.Metadata.registerValue( s["m"], "nodeData3", "something" )
Gaffer.Metadata.registerValue( s["n"], "nodeData3", "something" )
# register class value on GafferTest.AddNode:
Gaffer.Metadata.registerValue( GafferTest.AddNode, "nodeData3", "something" )
# register some instance plug values:
Gaffer.Metadata.registerValue( s["n"]["op1"], "plugData1", "something" )
Gaffer.Metadata.registerValue( s["n2"]["op2"], "plugData2", "something" )
Gaffer.Metadata.registerValue( s["m"]["op2"], "plugData3", "something" )
Gaffer.Metadata.registerValue( s["m"]["op1"], "plugData3", "something" )
# register class value on GafferTest.AddNode:
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "plugData3", "somethingElse" )
# test it lists nodes with matching data:
self.assertEqual( Gaffer.Metadata.nodesWithMetadata( s, "nodeData1" ), [ s["n"] ] )
self.assertEqual( Gaffer.Metadata.nodesWithMetadata( s, "nodeData2" ), [ s["n2"] ] )
self.assertEqual( Gaffer.Metadata.nodesWithMetadata( s, "nodeData3" ), [ s["n"], s["n2"], s["n3"], s["m"] ] )
self.assertEqual( set(Gaffer.Metadata.nodesWithMetadata( s, "nodeData3", instanceOnly=True )), set([ s["n"], s["m"] ]) )
# telling it to list plugs should make it return an empty list:
self.assertEqual( Gaffer.Metadata.plugsWithMetadata( s, "nodeData1" ), [] )
self.assertEqual( Gaffer.Metadata.plugsWithMetadata( s, "nodeData3" ), [] )
# test it lists plugs with matching data:
self.assertEqual( Gaffer.Metadata.plugsWithMetadata( s, "plugData1" ), [ s["n"]["op1"] ] )
self.assertEqual( Gaffer.Metadata.plugsWithMetadata( s, "plugData2" ), [ s["n2"]["op2"] ] )
self.assertEqual( Gaffer.Metadata.plugsWithMetadata( s, "plugData3" ), [ s["n"]["op1"], s["n2"]["op1"], s["n3"]["op1"], s["m"]["op1"], s["m"]["op2"] ] )
self.assertEqual( set( Gaffer.Metadata.plugsWithMetadata( s, "plugData3", instanceOnly=True ) ), set( [ s["m"]["op1"], s["m"]["op2"] ] ) )
# telling it to list nodes should make it return an empty list:
self.assertEqual( Gaffer.Metadata.nodesWithMetadata( s, "plugData1" ), [] )
self.assertEqual( Gaffer.Metadata.nodesWithMetadata( s, "plugData3" ), [] )
# test child removal:
m = s["m"]
s.removeChild( m )
self.assertEqual( Gaffer.Metadata.plugsWithMetadata( s, "plugData3", instanceOnly=True ), [] )
self.assertEqual( Gaffer.Metadata.nodesWithMetadata( s, "nodeData3", instanceOnly=True ), [ s["n"] ] )
def testNonNodeMetadata( self ) :
cs = GafferTest.CapturingSlot( Gaffer.Metadata.valueChangedSignal() )
self.assertEqual( len( cs ), 0 )
Gaffer.Metadata.registerValue( "testTarget", "testInt", 1 )
self.assertEqual( Gaffer.Metadata.value( "testTarget", "testInt" ), 1 )
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0], ( "testTarget", "testInt" ) )
intVectorData = IECore.IntVectorData( [ 1, 2 ] )
Gaffer.Metadata.registerValue( "testTarget", "testIntVectorData", intVectorData )
self.assertEqual( Gaffer.Metadata.value( "testTarget", "testIntVectorData" ), intVectorData )
self.assertFalse( Gaffer.Metadata.value( "testTarget", "testIntVectorData" ).isSame( intVectorData ) )
self.assertEqual( len( cs ), 2 )
self.assertEqual( cs[1], ( "testTarget", "testIntVectorData" ) )
Gaffer.Metadata.registerValue( "testTarget", "testDynamicValue", lambda : 20 )
self.assertEqual( Gaffer.Metadata.value( "testTarget", "testDynamicValue" ), 20 )
self.assertEqual( len( cs ), 3 )
self.assertEqual( cs[2], ( "testTarget", "testDynamicValue" ) )
names = Gaffer.Metadata.registeredValues( "testTarget" )
self.assertTrue( "testInt" in names )
self.assertTrue( "testIntVectorData" in names )
self.assertTrue( "testDynamicValue" in names )
self.assertTrue( names.index( "testInt" ) < names.index( "testIntVectorData" ) )
self.assertTrue( names.index( "testIntVectorData" ) < names.index( "testDynamicValue" ) )
def testOverwriteNonNodeMetadata( self ) :
cs = GafferTest.CapturingSlot( Gaffer.Metadata.valueChangedSignal() )
Gaffer.Metadata.registerValue( "testTarget", "testInt", 1 )
self.assertEqual( len( cs ), 1 )
self.assertEqual( Gaffer.Metadata.value( "testTarget", "testInt" ), 1 )
Gaffer.Metadata.registerValue( "testTarget", "testInt", 2 )
self.assertEqual( len( cs ), 2 )
self.assertEqual( Gaffer.Metadata.value( "testTarget", "testInt" ), 2 )
def testDeregisterNonNodeMetadata( self ) :
Gaffer.Metadata.registerValue( "testTarget", "testInt", 1 )
self.assertEqual( Gaffer.Metadata.value( "testTarget", "testInt" ), 1 )
cs = GafferTest.CapturingSlot( Gaffer.Metadata.valueChangedSignal() )
Gaffer.Metadata.deregisterValue( "testTarget", "testInt" )
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0], ( "testTarget", "testInt" ) )
self.assertEqual( Gaffer.Metadata.value( "testTarget", "testInt" ), None )
Gaffer.Metadata.deregisterValue( "testTarget", "nonExistentKey" )
self.assertEqual( len( cs ), 1 )
Gaffer.Metadata.deregisterValue( "nonExistentTarget", "testInt" )
self.assertEqual( len( cs ), 1 )
def testSerialisationQuoting( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
needsQuoting = """'"\n\\'!"""
Gaffer.Metadata.registerValue( s["n"], "test", needsQuoting )
Gaffer.Metadata.registerValue( s["n"], needsQuoting, "test" )
Gaffer.Metadata.registerValue( s["n"]["p"], "test", needsQuoting )
Gaffer.Metadata.registerValue( s["n"]["p"], needsQuoting, "test" )
self.assertEqual( Gaffer.Metadata.value( s["n"], "test" ), needsQuoting )
self.assertEqual( Gaffer.Metadata.value( s["n"], needsQuoting ), "test" )
self.assertEqual( Gaffer.Metadata.value( s["n"]["p"], "test" ), needsQuoting )
self.assertEqual( Gaffer.Metadata.value( s["n"]["p"], needsQuoting ), "test" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( Gaffer.Metadata.value( s2["n"], "test" ), needsQuoting )
self.assertEqual( Gaffer.Metadata.value( s2["n"], needsQuoting ), "test" )
self.assertEqual( Gaffer.Metadata.value( s2["n"]["p"], "test" ), needsQuoting )
self.assertEqual( Gaffer.Metadata.value( s2["n"]["p"], needsQuoting ), "test" )
def testSerialisationOnlyUsesDataWhenNecessary( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
for value in [
"s",
1,
2.0,
True,
imath.Color3f( 0 ),
imath.V2f( 0 ),
imath.V2i( 0 ),
imath.V3i( 0 ),
IECore.StringVectorData( [ "one", "two" ] ),
IECore.IntVectorData( [ 1, 2, 3 ] ),
] :
Gaffer.Metadata.registerValue( s["n"], "test", value )
Gaffer.Metadata.registerValue( s["n"]["p"], "test", value )
self.assertEqual( Gaffer.Metadata.value( s["n"], "test" ), value )
self.assertEqual( Gaffer.Metadata.value( s["n"]["p"], "test" ), value )
ss = s.serialise()
if not isinstance( value, IECore.Data ) :
self.assertTrue( "Data" not in ss )
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertEqual( Gaffer.Metadata.value( s2["n"], "test" ), value )
self.assertEqual( Gaffer.Metadata.value( s2["n"]["p"], "test" ), value )
def testOverloadedMethods( self ) :
n = GafferTest.AddNode()
Gaffer.Metadata.registerValue( n, "one", 1 )
Gaffer.Metadata.registerValue( n["op1"], "two", 2 )
self.assertEqual( Gaffer.Metadata.registeredValues( n, instanceOnly = True ), [ "one" ] )
self.assertEqual( Gaffer.Metadata.registeredValues( n["op1"], instanceOnly = True ), [ "two" ] )
self.assertEqual( Gaffer.Metadata.value( n, "one" ), 1 )
self.assertEqual( Gaffer.Metadata.value( n["op1"], "two" ), 2 )
Gaffer.Metadata.deregisterValue( n, "one" )
Gaffer.Metadata.deregisterValue( n["op1"], "two" )
self.assertEqual( Gaffer.Metadata.registeredValues( n, instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.registeredValues( n["op1"], instanceOnly = True ), [] )
@staticmethod
def testPythonUnload() :
subprocess.check_call( [ "gaffer", "python", os.path.dirname( __file__ ) + "/pythonScripts/unloadExceptionScript.py" ] )
def testWildcardsAndDot( self ) :
class MetadataTestNodeE( Gaffer.Node ) :
def __init__( self, name = "MetadataTestNodeE" ) :
Gaffer.Node.__init__( self, name )
self["p"] = Gaffer.Plug()
self["p"]["a"] = Gaffer.IntPlug()
self["p"]["b"] = Gaffer.Plug()
self["p"]["b"]["c"] = Gaffer.IntPlug()
self["p"]["b"]["d"] = Gaffer.IntPlug()
IECore.registerRunTimeTyped( MetadataTestNodeE )
Gaffer.Metadata.registerNode(
MetadataTestNodeE,
# '*' should not match '.', and `...` should
# match any number of components, including 0.
plugs = {
"*" : [
"root", True,
],
"*.a" : [
"a", True,
],
"*.b" : [
"b", True,
],
"*.b.*" : [
"cd", True,
],
"...c" : [
"c", True,
],
"p...d" : [
"d", True,
],
"...[cd]" : [
"cd2", True,
],
"..." : [
"all", True,
],
"p.b..." : [
"allB", True,
],
}
)
n = MetadataTestNodeE()
allPlugs = { n["p"], n["p"]["a"], n["p"]["b"], n["p"]["b"]["c"], n["p"]["b"]["d"] }
for key, plugs in (
( "root", { n["p"] } ),
( "a", { n["p"]["a"] } ),
( "b", { n["p"]["b"] } ),
( "cd", { n["p"]["b"]["c"], n["p"]["b"]["d"] } ),
( "c", { n["p"]["b"]["c"] } ),
( "d", { n["p"]["b"]["d"] } ),
( "cd2", { n["p"]["b"]["c"], n["p"]["b"]["d"] } ),
( "all", allPlugs ),
( "allB", { n["p"]["b"], n["p"]["b"]["c"], n["p"]["b"]["d"] } )
) :
for plug in allPlugs :
if plug in plugs :
self.assertEqual( Gaffer.Metadata.value( plug, key ), True )
self.assertIn( key, Gaffer.Metadata.registeredValues( plug ) )
else :
self.assertEqual( Gaffer.Metadata.value( plug, key ), None )
self.assertNotIn( key, Gaffer.Metadata.registeredValues( plug ) )
def testCantPassNoneForGraphComponent( self ) :
with six.assertRaisesRegex( self, Exception, "Python argument types" ) :
Gaffer.Metadata.registerValue( None, "test", "test" )
def testCallSignalDirectly( self ) :
cs1 = GafferTest.CapturingSlot( Gaffer.Metadata.valueChangedSignal() )
cs2 = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
cs3 = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
Gaffer.Metadata.valueChangedSignal()( "target", "key" )
Gaffer.Metadata.plugValueChangedSignal()( Gaffer.Node, "*", "key", None )
Gaffer.Metadata.nodeValueChangedSignal()( Gaffer.Node, "key", None )
self.assertEqual( cs1, [ ( "target", "key" ) ] )
self.assertEqual( cs2, [ ( Gaffer.Node.staticTypeId(), "*", "key", None ) ] )
self.assertEqual( cs3, [ ( Gaffer.Node.staticTypeId(), "key", None ) ] )
def testRegisterPlugTypeMetadata( self ) :
Gaffer.Metadata.registerValue( Gaffer.Color3fPlug, "testKey", "testValue" )
Gaffer.Metadata.registerValue( Gaffer.Color3fPlug, "testKey2", lambda plug : plug.staticTypeName() )
p1 = Gaffer.Color3fPlug()
p2 = Gaffer.FloatPlug()
self.assertEqual( Gaffer.Metadata.value( p1, "testKey" ), "testValue" )
self.assertEqual( Gaffer.Metadata.value( p2, "testKey" ), None )
self.assertEqual( Gaffer.Metadata.value( p1, "testKey2" ), "Gaffer::Color3fPlug" )
self.assertEqual( Gaffer.Metadata.value( p2, "testKey2" ), None )
def testPlugTypeMetadataEmitsPlugValueChangedSignal( self ) :
nodeChanges = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
plugChanges = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
Gaffer.Metadata.registerValue( Gaffer.FloatPlug, "testChanges", 10 )
self.assertEqual( len( nodeChanges ), 0 )
self.assertEqual( plugChanges, [ ( Gaffer.FloatPlug.staticTypeId(), "", "testChanges", None ) ] )
del plugChanges[:]
Gaffer.Metadata.deregisterValue( Gaffer.FloatPlug, "testChanges" )
self.assertEqual( len( nodeChanges ), 0 )
self.assertEqual( plugChanges, [ ( Gaffer.FloatPlug.staticTypeId(), "", "testChanges", None ) ] )
def testPlugTypeMetadataInRegisteredValues( self ) :
n = GafferTest.AddNode()
Gaffer.Metadata.registerValue( Gaffer.IntPlug, "typeRegistration", 10 )
self.assertIn( "typeRegistration", Gaffer.Metadata.registeredValues( n["op1"] ) )
Gaffer.Metadata.deregisterValue( Gaffer.IntPlug, "typeRegistration" )
self.assertNotIn( "typeRegistration", Gaffer.Metadata.registeredValues( n["op1"] ) )
def testMetadataRelativeToAncestorPlug( self ) :
n = Gaffer.Node()
n["user"]["p"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( Gaffer.Metadata.value( n["user"]["p"]["r"], "testPlugAncestor" ), None )
self.assertNotIn( "testPlugAncestor", Gaffer.Metadata.registeredValues( n["user"]["p"]["r"] ) )
Gaffer.Metadata.registerValue( Gaffer.Color3fPlug, "r", "testPlugAncestor", 10 )
self.assertEqual( Gaffer.Metadata.value( n["user"]["p"]["r"], "testPlugAncestor" ), 10 )
self.assertIn( "testPlugAncestor", Gaffer.Metadata.registeredValues( n["user"]["p"]["r"] ) )
def testValueFromNoneRaises( self ) :
with six.assertRaisesRegex( self, Exception, r"did not match C\+\+ signature" ) :
Gaffer.Metadata.value( None, "test" )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -7,004,529,297,010,446,000 | 36.115681 | 154 | 0.674309 | false |
Keeper-Security/Commander | keepercommander/custom/create_delete.py | 1 | 1159 | # _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2017 Keeper Security Inc.
# Contact: [email protected]
#
# Example showing how to create a record and upload
# to the server, then deleting the record from the
# server.
#
import getpass
import string
import random
from keepercommander.record import Record
from keepercommander.params import KeeperParams
from keepercommander import display, api
my_params = KeeperParams()
while not my_params.user:
my_params.user = getpass.getpass(prompt='User(Email): ', stream=None)
while not my_params.password:
my_params.password = getpass.getpass(prompt='Master Password: ', stream=None)
api.sync_down(my_params)
# Add record
r = Record()
r.title = 'Test Record'
r.login = '[email protected]'
# generate a 32-char random password
r.password = ''.join(random.SystemRandom().choice(string.printable) for _ in range(32))
if api.add_record(my_params, r):
print('Added record UID='+r.record_uid)
# Delete the record
if r.record_uid:
api.delete_record(my_params, r.record_uid)
| mit | -5,221,725,862,456,658,000 | 22.632653 | 88 | 0.66494 | false |
baharev/sdopt-tearing | blt_decomp.py | 1 | 3754 | # Copyright (C) 2014, 2015 University of Vienna
# All rights reserved.
# BSD license.
# Author: Ali Baharev <[email protected]>
from __future__ import print_function
import networkx as nx
from networkx.algorithms.bipartite import is_bipartite_node_set, projected_graph
from plot import dummy as plot
__all__ = [ 'Dulmage_Mendelsohn' ]
#log = print
def log(*args, **kwargs): pass
# When the real plot is used from plot, the order of the plots is as follows:
# (1) The test problem, (2) the bipartite graph after matching,
# (3) the condensed graph (the SCCs of the equations), and (4) the SCC subgraph
# with all nodes that are connected to this SCC (under/over-determined blocks).
def Dulmage_Mendelsohn(g, eqs):
'''The input graph g is assumed to be a bipartite graph with no isolated
nodes. Returns the diagonal blocks as a list of (equations, variables).'''
assert_all_in_graph(g, eqs)
assert_no_isolates(g)
assert eqs, 'At least one equation is expected'
assert is_bipartite_node_set(g, eqs)
# Maximum matching
mate = nx.max_weight_matching(g, maxcardinality=True)
matches = sorted((k,mate[k]) for k in mate if k in eqs)
log('Matches:')
for eq, var in matches:
log(eq, var)
# Direct the edges of g according to the matching
bipart = to_digraph(g, matches)
plot(bipart)
# Find the strongly connected components (SCCs) of the equations
eq_sccs = nx.condensation( projected_graph(bipart, eqs) )
plot(eq_sccs)
# Q: With proper implementation, shouldn't the SCCs be already top. sorted?
precedence_order = nx.topological_sort(eq_sccs)
# Collect the diagonal blocks as a list of (equations, variables)
diagonal_blocks = [ ]
seen = set()
for scc in precedence_order:
equations = eq_sccs.node[scc]['members']
variables = {n for eq in equations for n in g.edge[eq] if n not in seen}
seen.update(variables)
diagonal_blocks.append( (equations,list(variables)) )
return diagonal_blocks
def to_digraph(g, eq_var_matches):
bipart = nx.DiGraph()
for eq, var in eq_var_matches:
# eq -> var
bipart.add_edge(eq, var)
# eq <- var_k (dependencies of eq other than var)
deps = [n for n in g.edge[eq] if n!=var]
for var_k in deps:
bipart.add_edge(var_k, eq)
# eq_k <- var (other equations involving var)
substitute = [(var, n) for n in g.edge[var] if n!=eq]
bipart.add_edges_from(substitute)
# If there are no isolates in g, the above loop should have added all
# unmatched equations or variables as well
missed_nodes = [n for n in g if n not in bipart]
assert not missed_nodes, 'Failed to copy nodes: {}'.format(missed_nodes)
return bipart
def assert_all_in_graph(g, eqs):
missing = [ eq for eq in eqs if eq not in g ]
assert not missing, 'equations not in the input graph: {}'.format(missing)
def assert_no_isolates(g):
isolated_nodes = nx.isolates(g)
assert not isolated_nodes, isolated_nodes
#-------------------------------------------------------------------------------
def run_tests():
from test_tearing import gen_testproblems
for g, eqs, _ in gen_testproblems():
plot(g)
diagonal_blocks = Dulmage_Mendelsohn(g, eqs)
print('Equation SCCs in topologically sorted order:')
for equations, variables in diagonal_blocks:
equations, variables = sorted(equations), sorted(variables)
print('-----------------------------------')
print('Equations: ', equations)
print('New variables:', variables)
plot( g.subgraph(equations+variables) )
if __name__ == '__main__':
run_tests()
| bsd-3-clause | 8,460,373,804,732,289,000 | 39.365591 | 80 | 0.636388 | false |
benspaulding/django-faq | docs/conf.py | 1 | 7409 | # -*- coding: utf-8 -*-
#
# django-faq documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 17 13:09:21 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/latest/', None),
'django': ('http://django.readthedocs.org/en/latest/', None),
'sphinx': ('http://sphinx.readthedocs.org/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-faq'
copyright = u'2012, Ben Spaulding'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8'
# The full version, including alpha/beta/rc tags.
release = '0.8.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-faqdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-faq.tex', u'django-faq Documentation',
u'Ben Spaulding', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-faq', u'django-faq Documentation',
[u'Ben Spaulding'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| bsd-3-clause | 5,231,834,295,907,184,000 | 31.783186 | 80 | 0.706978 | false |
dadorado37/Trabajos_Python | empresa_arranque_gana.py | 1 | 7489 | # este link es para asegurarse de que un usuario ingrese un numero entero y no un caracter
# https://mail.python.org/pipermail/python-es/2011-September/030635.html
# empresa_arranque_gana.py
seleccion_menu_uno = 5
# define the function blocks
def uno():
print("\nEsa opcion es correcta\n")
print "cliente categoria 1"
print "nombre del cliente: ", nombre_cliente
print "la cantidad de escobas es: ", cantidad_escobas
print "el costo total de las escobas es: ", costo_total_escobas
print "la cantidad de recogedores es: ", cantidad_recogedores
print "el costo total de los recogedores es: ", costo_total_recogedores
print "la cantidad de aromatizantes es: ", cantidad_aromatizantes
print "el costo total de los aromatizantes es: ", costo_total_aromatizantes
print "la cantidad total de productos es: ", cantidad_total_productos
print "el subtotal de la compra es: ", subtotal_compra
descuento_compra = (subtotal_compra * 5) / 100
total_compra = subtotal_compra - descuento_compra
print "el total de compra es: ", total_compra
def dos():
print("\nEsa opcion es correcta\n")
print "cliente categoria 2"
print "nombre del cliente: ", nombre_cliente
print "la cantidad de escobas es: ", cantidad_escobas
print "el costo total de las escobas es: ", costo_total_escobas
print "la cantidad de recogedores es: ", cantidad_recogedores
print "el costo total de los recogedores es: ", costo_total_recogedores
print "la cantidad de aromatizantes es: ", cantidad_aromatizantes
print "el costo total de los aromatizantes es: ", costo_total_aromatizantes
print "la cantidad total de productos es: ", cantidad_total_productos
print "el subtotal de la compra es: ", subtotal_compra
descuento_compra = (subtotal_compra * 8) / 100
total_compra = subtotal_compra - descuento_compra
print "el total de compra es: ", total_compra
def tres():
print("\nEsa opcion es correcta\n")
print "cliente categoria 3"
print "nombre del cliente: ", nombre_cliente
print "la cantidad de escobas es: ", cantidad_escobas
print "el costo total de las escobas es: ", costo_total_escobas
print "la cantidad de recogedores es: ", cantidad_recogedores
print "el costo total de los recogedores es: ", costo_total_recogedores
print "la cantidad de aromatizantes es: ", cantidad_aromatizantes
print "el costo total de los aromatizantes es: ", costo_total_aromatizantes
print "la cantidad total de productos es: ", cantidad_total_productos
print "el subtotal de la compra es: ", subtotal_compra
descuento_compra = (subtotal_compra * 12) / 100
total_compra = subtotal_compra - descuento_compra
print "el total de compra es: ", total_compra
def cuatro():
print("\nEsa opcion es correcta\n")
print "cliente categoria 4"
print "nombre del cliente: ", nombre_cliente
print "la cantidad de escobas es: ", cantidad_escobas
print "el costo total de las escobas es: ", costo_total_escobas
print "la cantidad de recogedores es: ", cantidad_recogedores
print "el costo total de los recogedores es: ", costo_total_recogedores
print "la cantidad de aromatizantes es: ", cantidad_aromatizantes
print "el costo total de los aromatizantes es: ", costo_total_aromatizantes
print "la cantidad total de productos es: ", cantidad_total_productos
print "el subtotal de la compra es: ", subtotal_compra
descuento_compra = (subtotal_compra * 15) / 100
total_compra = subtotal_compra - descuento_compra
print "el total de compra es: ", total_compra
costo_escoba = 5000
costo_recogedor = 2000
costo_aromatizante = 3000
print "cual es su nombre"
nombre_cliente = raw_input()
print "\ndesea comprar escobas S / N "
desea_comprar_escobas = raw_input()
while (desea_comprar_escobas != 's') or (desea_comprar_escobas != 'S') or (desea_comprar_escobas != 'n') or (desea_comprar_escobas != 'N'):
if (desea_comprar_escobas == 's') or (desea_comprar_escobas == 'S') or (desea_comprar_escobas != 'n') or (desea_comprar_escobas != 'N'):
break
else:
print "\ningreso la opcion incorrecta"
print "desea comprar escobas S / N "
desea_comprar_escobas = raw_input()
print "\nok ingreso la opcion correcta"
print "\ndesea comprar recogedores S / N "
desea_comprar_recogedores = raw_input()
while (desea_comprar_recogedores != 's')or(desea_comprar_recogedores != 'S') or (desea_comprar_recogedores != 'n')or(desea_comprar_recogedores != 'N'):
if (desea_comprar_recogedores == 's')or(desea_comprar_recogedores == 'S') or (desea_comprar_recogedores != 'n')or(desea_comprar_recogedores != 'N'):
break
else:
print "\ningreso la opcion incorrecta"
print "desea comprar recogedores S / N "
desea_comprar_recogedores = raw_input()
print "\nok ingreso la opcion correcta"
print "\ndesea comprar aromatizantes S / N "
desea_comprar_aromatizantes = raw_input()
while (desea_comprar_aromatizantes != 's')or(desea_comprar_aromatizantes != 'S') or (desea_comprar_aromatizantes != 'n')or(desea_comprar_aromatizantes != 'N'):
if (desea_comprar_aromatizantes == 's')or(desea_comprar_aromatizantes == 'S') or (desea_comprar_aromatizantes != 'n')or(desea_comprar_aromatizantes != 'N'):
break
else:
print "\ningreso la opcion incorrecta"
print "desea comprar aromatizantes S / N "
desea_comprar_aromatizantes = raw_input()
print "\nok ingreso la opcion correcta\n"
if (desea_comprar_escobas == 's') or (desea_comprar_escobas == 'S'):
while 1:
print "digite la cantidad de escobas"
cantidad_escobas = raw_input()
if cantidad_escobas.isdigit():
cantidad_escobas = int(cantidad_escobas)
break
elif (desea_comprar_escobas == 'n') or (desea_comprar_escobas == 'N'):
cantidad_escobas = 0
if (desea_comprar_recogedores == 's')or(desea_comprar_recogedores == 'S'):
while 1:
print "digite la cantidad de recogedores"
cantidad_recogedores = raw_input()
if cantidad_recogedores.isdigit():
cantidad_recogedores = int(cantidad_recogedores)
break
elif (desea_comprar_recogedores == 'n') or (desea_comprar_recogedores == 'N'):
cantidad_recogedores = 0
if (desea_comprar_aromatizantes == 's') or (desea_comprar_aromatizantes == 'S'):
while 1:
print "digite la cantidad de aromatizantes"
cantidad_aromatizantes = raw_input()
if cantidad_aromatizantes.isdigit():
cantidad_aromatizantes = int(cantidad_aromatizantes)
break
elif (desea_comprar_aromatizantes == 'n') or (desea_comprar_aromatizantes == 'N'):
cantidad_aromatizantes = 0
costo_total_escobas = costo_escoba * cantidad_escobas
costo_total_recogedores = costo_recogedor * cantidad_recogedores
costo_total_aromatizantes = costo_aromatizante * cantidad_aromatizantes
cantidad_total_productos = cantidad_escobas + cantidad_recogedores + cantidad_aromatizantes
subtotal_compra = costo_total_escobas + costo_total_recogedores + costo_total_aromatizantes
while seleccion_menu_uno > 4:
# map the inputs to the function blocks
menu_uno = {1 : uno,
2 : dos,
3 : tres,
4 : cuatro,}
while 1:
print("opcion 1.- cliente categoria 1 se le descuenta el 5%")
print("opcion 2.- cliente categoria 2 se le descuenta el 8%")
print("opcion 3.- cliente categoria 3 se le descuenta el 12%")
print("opcion 4.- cliente categoria 4 se le descuenta el 15%")
print "\nescoja una opcion\n\n"
seleccion_menu_uno = raw_input()
if seleccion_menu_uno.isdigit():
seleccion_menu_uno = int(seleccion_menu_uno)
try:
menu_uno[seleccion_menu_uno]()
except:
print("\nEsa opcion no es correcta")
break
else:
print("\nEsa opcion no es correcta") | gpl-2.0 | -7,989,632,861,453,903,000 | 40.153846 | 159 | 0.72533 | false |
Hattivat/hypergolic-django | hypergolic/catalog/views/guidance_system_views.py | 1 | 1876 | from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView, DeleteView
from .base import GenericListView, GenericCreateView
from ..models import GuidanceSystem
from ..forms import GuidanceSystemForm
from django.core.urlresolvers import reverse_lazy
from django.core.urlresolvers import reverse
class GuidanceSystemListView(GenericListView):
model = GuidanceSystem
display_data = ('energy_consumption', 'description', 'illustration')
class GuidanceSystemDetailView(DetailView):
model = GuidanceSystem
template_name = "catalog/electric_detail.html"
class GuidanceSystemCreateView(GenericCreateView):
model = GuidanceSystem
form_class = GuidanceSystemForm
# fields = ['name', 'description', 'sources', 'illustration']
success_url = reverse_lazy("guidance_system_list")
def form_valid(self, form):
obj = form.save(commit=False)
obj.creator = self.request.user
obj.save()
return super(GuidanceSystemCreateView, self).form_valid(form)
def get_success_url(self):
return reverse("guidance_system_detail", args=(self.object.pk,))
class GuidanceSystemUpdateView(UpdateView):
model = GuidanceSystem
form_class = GuidanceSystemForm
# fields = ['name', 'description', 'sources', 'illustration']
template_name = "catalog/generic_update.html"
initial = {}
def form_valid(self, form):
obj = form.save(commit=False)
obj.modifier = self.request.user
obj.save()
return super(GuidanceSystemUpdateView, self).form_valid(form)
def get_success_url(self):
return reverse("guidance_system_detail", args=(self.object.pk,))
class GuidanceSystemDeleteView(DeleteView):
model = GuidanceSystem
template_name = "catalog/generic_delete.html"
success_url = reverse_lazy("guidance_system_list")
| agpl-3.0 | -2,839,638,340,755,405,300 | 32.5 | 72 | 0.723348 | false |
praekelt/jmbo-foundry | foundry/migrations/0008_auto.py | 1 | 23410 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field content_type on 'Listing'
db.create_table('foundry_listing_content_type', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('listing', models.ForeignKey(orm['foundry.listing'], null=False)),
('contenttype', models.ForeignKey(orm['contenttypes.contenttype'], null=False))
))
db.create_unique('foundry_listing_content_type', ['listing_id', 'contenttype_id'])
def backwards(self, orm):
# Removing M2M table for field content_type on 'Listing'
db.delete_table('foundry_listing_content_type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'foundry.blogpost': {
'Meta': {'ordering': "('-created',)", 'object_name': 'BlogPost', '_ormbases': ['jmbo.ModelBase']},
'content': ('ckeditor.fields.RichTextField', [], {}),
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.chatroom': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ChatRoom', '_ormbases': ['jmbo.ModelBase']},
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.column': {
'Meta': {'object_name': 'Column'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'row': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Row']"}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '8'})
},
'foundry.country': {
'Meta': {'ordering': "('title',)", 'object_name': 'Country'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minimum_age': ('django.db.models.fields.PositiveIntegerField', [], {'default': '18'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'foundry.defaultavatar': {
'Meta': {'object_name': 'DefaultAvatar'},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'defaultavatar_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.foundrycomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'FoundryComment', '_ormbases': ['comments.Comment']},
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.FoundryComment']", 'null': 'True', 'blank': 'True'})
},
'foundry.link': {
'Meta': {'object_name': 'Link'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'foundry.listing': {
'Meta': {'object_name': 'Listing'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['jmbo.ModelBase']", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {}),
'display_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'foundry.member': {
'Meta': {'object_name': 'Member', '_ormbases': ['auth.User']},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.menu': {
'Meta': {'object_name': 'Menu'},
'display_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.menulinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'MenuLinkPosition'},
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Menu']"}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.navbar': {
'Meta': {'object_name': 'Navbar'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.navbarlinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'NavbarLinkPosition'},
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'navbar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Navbar']"}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.page': {
'Meta': {'object_name': 'Page'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'foundry.row': {
'Meta': {'object_name': 'Row'},
'block_name': ('django.db.models.fields.CharField', [], {'default': "'content'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Page']"})
},
'foundry.tile': {
'Meta': {'object_name': 'Tile'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Column']"}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'enable_ajax': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tile_target_content_type'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.59999999999999998'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['foundry']
| bsd-3-clause | 8,287,324,540,936,429,000 | 80.567944 | 195 | 0.547117 | false |
cpennington/edx-platform | common/lib/xmodule/xmodule/library_content_module.py | 1 | 28855 | # -*- coding: utf-8 -*-
"""
LibraryContent: The XBlock used to include blocks from a library in a course.
"""
import json
import logging
import random
from copy import copy
from gettext import ngettext
from pkg_resources import resource_string
import six
from capa.responsetypes import registry
from lazy import lazy
from lxml import etree
from opaque_keys.edx.locator import LibraryLocator
from six import text_type
from six.moves import zip
from web_fragments.fragment import Fragment
from webob import Response
from xblock.core import XBlock
from xblock.fields import Integer, List, Scope, String
from xmodule.studio_editable import StudioEditableDescriptor, StudioEditableModule
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.x_module import STUDENT_VIEW, XModule
from .mako_module import MakoModuleDescriptor
from .xml_module import XmlDescriptor
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
logger = logging.getLogger(__name__)
ANY_CAPA_TYPE_VALUE = 'any'
def _get_human_name(problem_class):
"""
Get the human-friendly name for a problem type.
"""
return getattr(problem_class, 'human_name', problem_class.__name__)
def _get_capa_types():
"""
Gets capa types tags and labels
"""
capa_types = {tag: _get_human_name(registry.get_class_for_tag(tag)) for tag in registry.registered_tags()}
return [{'value': ANY_CAPA_TYPE_VALUE, 'display_name': _('Any Type')}] + sorted([
{'value': capa_type, 'display_name': caption}
for capa_type, caption in capa_types.items()
], key=lambda item: item.get('display_name'))
class LibraryContentFields(object):
"""
Fields for the LibraryContentModule.
Separated out for now because they need to be added to the module and the
descriptor.
"""
# Please note the display_name of each field below is used in
# common/test/acceptance/pages/studio/library.py:StudioLibraryContentXBlockEditModal
# to locate input elements - keep synchronized
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
default="Randomized Content Block",
scope=Scope.settings,
)
source_library_id = String(
display_name=_("Library"),
help=_("Select the library from which you want to draw content."),
scope=Scope.settings,
values_provider=lambda instance: instance.source_library_values(),
)
source_library_version = String(
# This is a hidden field that stores the version of source_library when we last pulled content from it
display_name=_("Library Version"),
scope=Scope.settings,
)
mode = String(
display_name=_("Mode"),
help=_("Determines how content is drawn from the library"),
default="random",
values=[
{"display_name": _("Choose n at random"), "value": "random"}
# Future addition: Choose a new random set of n every time the student refreshes the block, for self tests
# Future addition: manually selected blocks
],
scope=Scope.settings,
)
max_count = Integer(
display_name=_("Count"),
help=_("Enter the number of components to display to each student."),
default=1,
scope=Scope.settings,
)
capa_type = String(
display_name=_("Problem Type"),
help=_('Choose a problem type to fetch from the library. If "Any Type" is selected no filtering is applied.'),
default=ANY_CAPA_TYPE_VALUE,
values=_get_capa_types(),
scope=Scope.settings,
)
selected = List(
# This is a list of (block_type, block_id) tuples used to record
# which random/first set of matching blocks was selected per user
default=[],
scope=Scope.user_state,
)
has_children = True
@property
def source_library_key(self):
"""
Convenience method to get the library ID as a LibraryLocator and not just a string
"""
return LibraryLocator.from_string(self.source_library_id)
#pylint: disable=abstract-method
@XBlock.wants('library_tools') # Only needed in studio
class LibraryContentModule(LibraryContentFields, XModule, StudioEditableModule):
"""
An XBlock whose children are chosen dynamically from a content library.
Can be used to create randomized assessments among other things.
Note: technically, all matching blocks from the content library are added
as children of this block, but only a subset of those children are shown to
any particular student.
"""
@classmethod
def make_selection(cls, selected, children, max_count, mode):
"""
Dynamically selects block_ids indicating which of the possible children are displayed to the current user.
Arguments:
selected - list of (block_type, block_id) tuples assigned to this student
children - children of this block
max_count - number of components to display to each student
mode - how content is drawn from the library
Returns:
A dict containing the following keys:
'selected' (set) of (block_type, block_id) tuples assigned to this student
'invalid' (set) of dropped (block_type, block_id) tuples that are no longer valid
'overlimit' (set) of dropped (block_type, block_id) tuples that were previously selected
'added' (set) of newly added (block_type, block_id) tuples
"""
rand = random.Random()
selected = set(tuple(k) for k in selected) # set of (block_type, block_id) tuples assigned to this student
# Determine which of our children we will show:
valid_block_keys = set([(c.block_type, c.block_id) for c in children])
# Remove any selected blocks that are no longer valid:
invalid_block_keys = (selected - valid_block_keys)
if invalid_block_keys:
selected -= invalid_block_keys
# If max_count has been decreased, we may have to drop some previously selected blocks:
overlimit_block_keys = set()
if len(selected) > max_count:
num_to_remove = len(selected) - max_count
overlimit_block_keys = set(rand.sample(selected, num_to_remove))
selected -= overlimit_block_keys
# Do we have enough blocks now?
num_to_add = max_count - len(selected)
added_block_keys = None
if num_to_add > 0:
# We need to select [more] blocks to display to this user:
pool = valid_block_keys - selected
if mode == "random":
num_to_add = min(len(pool), num_to_add)
added_block_keys = set(rand.sample(pool, num_to_add))
# We now have the correct n random children to show for this user.
else:
raise NotImplementedError("Unsupported mode.")
selected |= added_block_keys
return {
'selected': selected,
'invalid': invalid_block_keys,
'overlimit': overlimit_block_keys,
'added': added_block_keys,
}
def _publish_event(self, event_name, result, **kwargs):
"""
Helper method to publish an event for analytics purposes
"""
event_data = {
"location": six.text_type(self.location),
"result": result,
"previous_count": getattr(self, "_last_event_result_count", len(self.selected)),
"max_count": self.max_count,
}
event_data.update(kwargs)
self.runtime.publish(self, "edx.librarycontentblock.content.{}".format(event_name), event_data)
self._last_event_result_count = len(result) # pylint: disable=attribute-defined-outside-init
@classmethod
def publish_selected_children_events(cls, block_keys, format_block_keys, publish_event):
"""
Helper method for publishing events when children blocks are
selected/updated for a user. This helper is also used by
the ContentLibraryTransformer.
Arguments:
block_keys -
A dict describing which events to publish (add or
remove), see `make_selection` above for format details.
format_block_keys -
A function to convert block keys to the format expected
by publish_event. Must have the signature:
[(block_type, block_id)] -> T
Where T is a collection of block keys as accepted by
`publish_event`.
publish_event -
Function that handles the actual publishing. Must have
the signature:
<'removed'|'assigned'> -> result:T -> removed:T -> reason:str -> None
Where T is a collection of block_keys as returned by
`format_block_keys`.
"""
if block_keys['invalid']:
# reason "invalid" means deleted from library or a different library is now being used.
publish_event(
"removed",
result=format_block_keys(block_keys['selected']),
removed=format_block_keys(block_keys['invalid']),
reason="invalid"
)
if block_keys['overlimit']:
publish_event(
"removed",
result=format_block_keys(block_keys['selected']),
removed=format_block_keys(block_keys['overlimit']),
reason="overlimit"
)
if block_keys['added']:
publish_event(
"assigned",
result=format_block_keys(block_keys['selected']),
added=format_block_keys(block_keys['added'])
)
def selected_children(self):
"""
Returns a set() of block_ids indicating which of the possible children
have been selected to display to the current user.
This reads and updates the "selected" field, which has user_state scope.
Note: self.selected and the return value contain block_ids. To get
actual BlockUsageLocators, it is necessary to use self.children,
because the block_ids alone do not specify the block type.
"""
if hasattr(self, "_selected_set"):
# Already done:
return self._selected_set # pylint: disable=access-member-before-definition
block_keys = self.make_selection(self.selected, self.children, self.max_count, "random") # pylint: disable=no-member
# Publish events for analytics purposes:
lib_tools = self.runtime.service(self, 'library_tools')
format_block_keys = lambda keys: lib_tools.create_block_analytics_summary(self.location.course_key, keys)
self.publish_selected_children_events(
block_keys,
format_block_keys,
self._publish_event,
)
# Save our selections to the user state, to ensure consistency:
selected = block_keys['selected']
self.selected = list(selected) # TODO: this doesn't save from the LMS "Progress" page.
# Cache the results
self._selected_set = selected # pylint: disable=attribute-defined-outside-init
return selected
def _get_selected_child_blocks(self):
"""
Generator returning XBlock instances of the children selected for the
current user.
"""
for block_type, block_id in self.selected_children():
child = self.runtime.get_block(self.location.course_key.make_usage_key(block_type, block_id))
if child is None:
logger.info("Child not found for %s %s", str(block_type), str(block_id))
yield child
def student_view(self, context):
fragment = Fragment()
contents = []
child_context = {} if not context else copy(context)
for child in self._get_selected_child_blocks():
for displayable in child.displayable_items():
rendered_child = displayable.render(STUDENT_VIEW, child_context)
fragment.add_fragment_resources(rendered_child)
contents.append({
'id': text_type(displayable.location),
'content': rendered_child.content,
})
fragment.add_content(self.system.render_template('vert_module.html', {
'items': contents,
'xblock_context': context,
'show_bookmark_button': False,
'watched_completable_blocks': set(),
'completion_delay_ms': None,
}))
return fragment
def validate(self):
"""
Validates the state of this Library Content Module Instance.
"""
return self.descriptor.validate()
def author_view(self, context):
"""
Renders the Studio views.
Normal studio view: If block is properly configured, displays library status summary
Studio container view: displays a preview of all possible children.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_root = root_xblock and root_xblock.location == self.location
if is_root:
# User has clicked the "View" link. Show a preview of all possible children:
if self.children: # pylint: disable=no-member
fragment.add_content(self.system.render_template("library-block-author-preview-header.html", {
'max_count': self.max_count,
'display_name': self.display_name or self.url_name,
}))
context['can_edit_visibility'] = False
context['can_move'] = False
self.render_children(context, fragment, can_reorder=False, can_add=False)
# else: When shown on a unit page, don't show any sort of preview -
# just the status of this block in the validation area.
# The following JS is used to make the "Update now" button work on the unit page and the container view:
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/library_content_edit.js'))
fragment.initialize_js('LibraryContentAuthorView')
return fragment
def get_child_descriptors(self):
"""
Return only the subset of our children relevant to the current student.
"""
return list(self._get_selected_child_blocks())
@XBlock.wants('user')
@XBlock.wants('library_tools') # Only needed in studio
@XBlock.wants('studio_user_permissions') # Only available in studio
class LibraryContentDescriptor(LibraryContentFields, MakoModuleDescriptor, XmlDescriptor, StudioEditableDescriptor):
"""
Descriptor class for LibraryContentModule XBlock.
"""
resources_dir = 'assets/library_content'
module_class = LibraryContentModule
mako_template = 'widgets/metadata-edit.html'
js = {'js': [resource_string(__name__, 'js/src/vertical/edit.js')]}
js_module_name = "VerticalDescriptor"
show_in_read_only_mode = True
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(LibraryContentDescriptor, self).non_editable_metadata_fields
# The only supported mode is currently 'random'.
# Add the mode field to non_editable_metadata_fields so that it doesn't
# render in the edit form.
non_editable_fields.extend([LibraryContentFields.mode, LibraryContentFields.source_library_version])
return non_editable_fields
@lazy
def tools(self):
"""
Grab the library tools service or raise an error.
"""
return self.runtime.service(self, 'library_tools')
def get_user_id(self):
"""
Get the ID of the current user.
"""
user_service = self.runtime.service(self, 'user')
if user_service:
# May be None when creating bok choy test fixtures
user_id = user_service.get_current_user().opt_attrs.get('edx-platform.user_id', None)
else:
user_id = None
return user_id
@XBlock.handler
def refresh_children(self, request=None, suffix=None): # pylint: disable=unused-argument
"""
Refresh children:
This method is to be used when any of the libraries that this block
references have been updated. It will re-fetch all matching blocks from
the libraries, and copy them as children of this block. The children
will be given new block_ids, but the definition ID used should be the
exact same definition ID used in the library.
This method will update this block's 'source_library_id' field to store
the version number of the libraries used, so we easily determine if
this block is up to date or not.
"""
user_perms = self.runtime.service(self, 'studio_user_permissions')
user_id = self.get_user_id()
if not self.tools:
return Response("Library Tools unavailable in current runtime.", status=400)
self.tools.update_children(self, user_id, user_perms)
return Response()
# Copy over any overridden settings the course author may have applied to the blocks.
def _copy_overrides(self, store, user_id, source, dest):
"""
Copy any overrides the user has made on blocks in this library.
"""
for field in six.itervalues(source.fields):
if field.scope == Scope.settings and field.is_set_on(source):
setattr(dest, field.name, field.read_from(source))
if source.has_children:
source_children = [self.runtime.get_block(source_key) for source_key in source.children]
dest_children = [self.runtime.get_block(dest_key) for dest_key in dest.children]
for source_child, dest_child in zip(source_children, dest_children):
self._copy_overrides(store, user_id, source_child, dest_child)
store.update_item(dest, user_id)
def studio_post_duplicate(self, store, source_block):
"""
Used by the studio after basic duplication of a source block. We handle the children
ourselves, because we have to properly reference the library upstream and set the overrides.
Otherwise we'll end up losing data on the next refresh.
"""
# The first task will be to refresh our copy of the library to generate the children.
# We must do this at the currently set version of the library block. Otherwise we may not have
# exactly the same children-- someone may be duplicating an out of date block, after all.
user_id = self.get_user_id()
user_perms = self.runtime.service(self, 'studio_user_permissions')
if not self.tools:
raise RuntimeError("Library tools unavailable, duplication will not be sane!")
self.tools.update_children(self, user_id, user_perms, version=self.source_library_version)
self._copy_overrides(store, user_id, source_block, self)
# Children have been handled.
return True
def _validate_library_version(self, validation, lib_tools, version, library_key):
"""
Validates library version
"""
latest_version = lib_tools.get_library_version(library_key)
if latest_version is not None:
if version is None or version != six.text_type(latest_version):
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.WARNING,
_(u'This component is out of date. The library has new content.'),
# TODO: change this to action_runtime_event='...' once the unit page supports that feature.
# See https://openedx.atlassian.net/browse/TNL-993
action_class='library-update-btn',
# Translators: {refresh_icon} placeholder is substituted to "↻" (without double quotes)
action_label=_(u"{refresh_icon} Update now.").format(refresh_icon=u"↻")
)
)
return False
else:
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(u'Library is invalid, corrupt, or has been deleted.'),
action_class='edit-button',
action_label=_(u"Edit Library List.")
)
)
return False
return True
def _set_validation_error_if_empty(self, validation, summary):
""" Helper method to only set validation summary if it's empty """
if validation.empty:
validation.set_summary(summary)
def validate(self):
"""
Validates the state of this Library Content Module Instance. This
is the override of the general XBlock method, and it will also ask
its superclass to validate.
"""
validation = super(LibraryContentDescriptor, self).validate()
if not isinstance(validation, StudioValidation):
validation = StudioValidation.copy(validation)
library_tools = self.runtime.service(self, "library_tools")
if not (library_tools and library_tools.can_use_library_content(self)):
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(
u"This course does not support content libraries. "
u"Contact your system administrator for more information."
)
)
)
return validation
if not self.source_library_id:
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED,
_(u"A library has not yet been selected."),
action_class='edit-button',
action_label=_(u"Select a Library.")
)
)
return validation
lib_tools = self.runtime.service(self, 'library_tools')
self._validate_library_version(validation, lib_tools, self.source_library_version, self.source_library_key)
# Note: we assume refresh_children() has been called
# since the last time fields like source_library_id or capa_types were changed.
matching_children_count = len(self.children) # pylint: disable=no-member
if matching_children_count == 0:
self._set_validation_error_if_empty(
validation,
StudioValidationMessage(
StudioValidationMessage.WARNING,
_(u'There are no matching problem types in the specified libraries.'),
action_class='edit-button',
action_label=_(u"Select another problem type.")
)
)
if matching_children_count < self.max_count:
self._set_validation_error_if_empty(
validation,
StudioValidationMessage(
StudioValidationMessage.WARNING,
(
ngettext(
u'The specified library is configured to fetch {count} problem, ',
u'The specified library is configured to fetch {count} problems, ',
self.max_count
) +
ngettext(
u'but there is only {actual} matching problem.',
u'but there are only {actual} matching problems.',
matching_children_count
)
).format(count=self.max_count, actual=matching_children_count),
action_class='edit-button',
action_label=_(u"Edit the library configuration.")
)
)
return validation
def source_library_values(self):
"""
Return a list of possible values for self.source_library_id
"""
lib_tools = self.runtime.service(self, 'library_tools')
user_perms = self.runtime.service(self, 'studio_user_permissions')
all_libraries = [
(key, name) for key, name in lib_tools.list_available_libraries()
if user_perms.can_read(key) or self.source_library_id == six.text_type(key)
]
all_libraries.sort(key=lambda entry: entry[1]) # Sort by name
if self.source_library_id and self.source_library_key not in [entry[0] for entry in all_libraries]:
all_libraries.append((self.source_library_id, _(u"Invalid Library")))
all_libraries = [(u"", _("No Library Selected"))] + all_libraries
values = [{"display_name": name, "value": six.text_type(key)} for key, name in all_libraries]
return values
def editor_saved(self, user, old_metadata, old_content):
"""
If source_library_id or capa_type has been edited, refresh_children automatically.
"""
old_source_library_id = old_metadata.get('source_library_id', [])
if (old_source_library_id != self.source_library_id or
old_metadata.get('capa_type', ANY_CAPA_TYPE_VALUE) != self.capa_type):
try:
self.refresh_children()
except ValueError:
pass # The validation area will display an error message, no need to do anything now.
def has_dynamic_children(self):
"""
Inform the runtime that our children vary per-user.
See get_child_descriptors() above
"""
return True
def get_content_titles(self):
"""
Returns list of friendly titles for our selected children only; without
thi, all possible children's titles would be seen in the sequence bar in
the LMS.
This overwrites the get_content_titles method included in x_module by default.
"""
titles = []
for child in self._xmodule.get_child_descriptors():
titles.extend(child.get_content_titles())
return titles
@classmethod
def definition_from_xml(cls, xml_object, system):
children = [
system.process_xml(etree.tostring(child)).scope_ids.usage_id
for child in xml_object.getchildren()
]
definition = {
attr_name: json.loads(attr_value)
for attr_name, attr_value in xml_object.attrib.items()
}
return definition, children
def definition_to_xml(self, resource_fs):
""" Exports Library Content Module to XML """
xml_object = etree.Element('library_content')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
# Set node attributes based on our fields.
for field_name, field in six.iteritems(self.fields): # pylint: disable=no-member
if field_name in ('children', 'parent', 'content'):
continue
if field.is_set_on(self):
xml_object.set(field_name, six.text_type(field.read_from(self)))
return xml_object
class LibrarySummary(object):
"""
A library summary object which contains the fields required for library listing on studio.
"""
def __init__(self, library_locator, display_name):
"""
Initialize LibrarySummary
Arguments:
library_locator (LibraryLocator): LibraryLocator object of the library.
display_name (unicode): display name of the library.
"""
self.display_name = display_name if display_name else _(u"Empty")
self.id = library_locator # pylint: disable=invalid-name
self.location = library_locator.make_usage_key('library', 'library')
@property
def display_org_with_default(self):
"""
Org display names are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'org' field from the key.
"""
return self.location.library_key.org
@property
def display_number_with_default(self):
"""
Display numbers are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'library' field from the key.
"""
return self.location.library_key.library
| agpl-3.0 | 5,388,732,644,370,925,000 | 40.452586 | 125 | 0.614121 | false |
thetomcraig/redwood | examples/tcp-ip/serverClean.py | 1 | 7235 | #!/usr/bin/python
from PyQt4 import QtGui
from PyQt4 import QtCore
import time
import sys
import math
import myGui
import functions
import SocketServer
import threading
import socket
import random
import parameters
import datetime
from collections import deque
params=parameters.getParams()
socket.setdefaulttimeout(2)
class Server(QtGui.QWidget):
def __init__(self,server, bind_and_activate=True):
super(Server, self).__init__()
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
HOST=params['serverIP']
PORT=params['port']
self.server=server
self.server.queue=[]
t = threading.Thread(target=self.server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
self.initialize()
def initialize(self):
self.status=0
self.experimentStarted=0
self.playerDict={}
self.makeQueue()
self.assignGroups()
self.makeDataFiles()
self.initUI()
def makeQueue(self):
self.queue={}
self.queues=['unimportant','important','linksToChange','resend']
for Q in self.queues:
self.queue[Q]=deque([])
def assignGroups(self):
self.totalPlayers=params['groups']*params['playersPerGroup']
this=[]
j=1
for group in range(1,params['groups']+1):
for player in range(1,params['playersPerGroup']+1):
#ComputerNumber,GroupNumber,PlayerNumberInGroup
this.append([j,group,player])
j=j+1
random.shuffle(this)
self.groupParameters={}
for group in range(1,params['groups']+1):
self.groupParameters[group]={}
self.groupParameters[group]['timeVectorNumber']=[]
self.groupParameters[group]['timeVectorReveal']=[]
for period in range(1,params['totalPeriods']+1):
self.timeVectorNumber,self.timeVectorReveal=functions.getInvestments(params['p0'],params['mu'])
self.groupParameters[group]['timeVectorNumber'].append(self.timeVectorNumber)
self.groupParameters[group]['timeVectorReveal'].append(self.timeVectorReveal)
self.groupAssignments=this
def makeDataFiles(self):
self.dataFile=datetime.datetime.now().strftime("sessions/%Y%m%d-%H%M%S/data.csv")
self.playerFile=self.dataFile.replace("data","players")
self.parameterFile=self.dataFile.replace("data.csv","parameters.py")
myGui.ensure_dir(self.dataFile)
file = open(self.playerFile,'w')
file.writelines("computerNumber,subjectNumber,groupNumber,IP,localStartTime,payoffPoints,payoffDollars\n")
file.close()
file = open(self.dataFile,'a')
file.writelines("group,linkStart,linkEnd,addLink,cost,globalTime\n")
file.close()
filename='parameters.py'
file = open(filename,'r')
fileData=file.read()
file.close()
file = open(self.parameterFile,'w')
file.writelines(fileData)
file.close()
def initUI(self):
self.currentPage="Overview"
self.pageNavigator()
def pageNavigator(self):
if self.currentPage=="Overview":
self.makePageOverview()
def makePageOverview(self):
#Titles
self.statusLabel = QtGui.QLabel('Waiting For People To Register')
self.statusLabel.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.statusLabel.setAlignment(QtCore.Qt.AlignCenter)
#Tables
self.statusTable = myGui.Table()
self.statusTable.data=[]
for player in range(self.totalPlayers):
self.statusTable.data.append(["","","","","",""])
self.statusTable.columnWidths=[100,100,100,100,100,100]
self.statusTable.rowHeight=50
self.statusTable.columnHeaders=['Computer','IP Address','Group','GroupID','Status','Other']
self.statusTable.updateTable()
self.grid = QtGui.QGridLayout()
self.button=myGui.ButtonDoubleClick()
self.button.title1="Start Experiment"
self.button.title2="You Sure?!?"
self.button.title3="Started!"
self.grid.addWidget(self.statusLabel,1,1,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.statusTable,2,1,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.button,3,1,QtCore.Qt.AlignCenter)
self.grid.setRowMinimumHeight(2,600)
self.grid.setRowMinimumHeight(3,100)
self.setLayout(self.grid)
self.show()
self.checkStatus()
def queueManager(self):
while len(self.server.queue)>0:
k=self.server.queue.pop()
if k[1]=="getPlayerNumber":
self.queue['important'].append(k)
for Q in self.queues:
while len(self.queue[Q])>0:
thisMessage=self.queue[Q].popleft()
messageIp=thisMessage[0]
messageType=thisMessage[1]
messageValue=thisMessage[2]
if messageType=="getPlayerNumber":
self.getPlayerNumber(messageIp)
elif messageType=="periodSummary":
self.manageData(messageIp,messageValue)
def manageData(self,messageIp,messageValue):
periodSummary=eval(messageData)
#subjectID,group,groupID,period,
def getPlayerNumber(self,messageIp):
print "getting new number"
if messageIp not in self.playerDict:
this=self.groupAssignments.pop()
self.playerDict[messageIp]={}
self.playerDict[messageIp]['computerID']=this[0]
self.playerDict[messageIp]['group']=this[1]
self.playerDict[messageIp]['groupID']=this[2]
m=[messageIp,params['port'],"assignPlayerNumber",self.playerDict[messageIp]['computerID']]
this=myGui.sendMessage(m[0],m[1],m[2],m[3])
self.statusTable.data[self.playerDict[messageIp]['computerID']-1][0]=str(self.playerDict[messageIp]['computerID'])
self.statusTable.data[self.playerDict[messageIp]['computerID']-1][1]=messageIp
self.statusTable.data[self.playerDict[messageIp]['computerID']-1][2]=str(self.playerDict[messageIp]['group'])
self.statusTable.data[self.playerDict[messageIp]['computerID']-1][3]=str(self.playerDict[messageIp]['groupID'])
self.statusTable.updateTable()
def checkStatus(self):
#print "check status %s"%(time.time())
self.queueManager()
if self.button.stage==3 and self.experimentStarted==0:
#Experiment has started:
self.experimentStarted=1
self.period=0
self.periodStage=0
elif self.experimentStarted==1:
if self.periodStage==0:
#Start Period
for ip in self.playerDict:
group=self.playerDict[ip]['group']
timesString="["
numbersString="["
for j,k in zip(self.groupParameters[group]['timeVectorNumber'][self.period-1],self.groupParameters[group]['timeVectorReveal'][self.period-1]):
timesString=timesString+"%.02f,"%(k)
numbersString=numbersString+"%s,"%(j)
timesString=timesString[:-1]+"]"
numbersString=numbersString[:-1]+"]"
m=[ip,params['port'],"periodStarted",[self.period,timesString,numbersString]]
this=myGui.sendMessage(m[0],m[1],m[2],m[3])
self.periodStage=1
elif self.periodStage==1:
#Wait for all responses
#m=[ip,params['port'],"periodFinished",["SUMMARY STATS HERE"]]
print "wainting"
elif self.periodStage==2:
#Finish period
m=[ip,params['port'],"periodFinished",["SUMMARY STATS HERE"]]
QtCore.QTimer.singleShot(10,self.checkStatus)
def main():
HOST, PORT = "", 9989
# Create the server, binding to localhost on port 9999
server = SocketServer.TCPServer((HOST, PORT),myGui.MyTCPHandler, bind_and_activate=True)
app = QtGui.QApplication(sys.argv)
window = Server(server)
###
server.allow_reuse_address = True
window.allow_reuse_address = True
###
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| isc | -6,759,639,312,788,060,000 | 29.787234 | 147 | 0.719281 | false |
cjgrady/stinkbait | reports/providerAggregate.py | 1 | 3807 | """
@summary: Creates a report with comparative statistics across providers
@author: CJ Grady
@version: 1.0
@status: alpha
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
from modules.providers import PROVIDERS
from results.resultsRetrieverNumpy import getNumberOfRareTaxaByPoints, \
getNumberOfRareTaxaByProviders, getNumberOfTaxaRepresented, \
getNumberOfUniqueTaxa, providerOfRareTaxa, providerRankedInTopX, \
subsetResults
outFn = "/home/cjgrady/workspace/occDataMiningPOC/data/reports/fish/providers.html"
if __name__ == "__main__":
numUnique = []
numRareSpecies = []
numRareProv = []
numTaxa = []
results = subsetResults()
for k in PROVIDERS:
print PROVIDERS[k]
nUnique = getNumberOfUniqueTaxa(results, providerId=k)
nRareSp = getNumberOfRareTaxaByPoints(results, providerId=k)
nRareP = getNumberOfRareTaxaByProviders(results, providerId=k)
numTax = getNumberOfTaxaRepresented(results, providerId=k)
numUnique.append((nUnique, k))
numRareSpecies.append((nRareSp, k))
numRareProv.append((nRareP, k))
numTaxa.append((numTax, k))
numUnique.sort(reverse=True)
numRareSpecies.sort(reverse=True)
numRareProv.sort(reverse=True)
numTaxa.sort(reverse=True)
with open(outFn, 'w') as outF:
outF.write('<html>\n')
outF.write(' <head>\n')
outF.write(' <title>Providers report</title>\n')
outF.write(' </head>\n')
outF.write(' <body>\n')
# Unique
outF.write(' <h1>Most unique taxa</h1>\n')
outF.write(' <ol>\n')
for n, k in numUnique:
outF.write(' <li>%s - %s</li>\n' % (PROVIDERS[k], n))
outF.write(' </ol>\n')
outF.write(' <br /><br />')
# Rare by species
outF.write(' <h1>Most rare species (<= 10 points)</h1>\n')
outF.write(' <ol>\n')
for n, k in numRareSpecies:
outF.write(' <li>%s - %s</li>\n' % (PROVIDERS[k], n))
outF.write(' </ol>\n')
outF.write(' <br /><br />')
# Rare by provider
outF.write(' <h1>Most rare species (<= 5 providers)</h1>\n')
outF.write(' <ol>\n')
for n, k in numRareProv:
outF.write(' <li>%s - %s</li>\n' % (PROVIDERS[k], n))
outF.write(' </ol>\n')
outF.write(' <br /><br />')
# Number of taxa
outF.write(' <h1>Number of species</h1>\n')
outF.write(' <ol>\n')
for n, k in numTaxa:
outF.write(' <li>%s - %s</li>\n' % (PROVIDERS[k], n))
outF.write(' </ol>\n')
outF.write(' <br /><br />')
outF.write(' </body>\n')
outF.write('</html>\n')
| gpl-2.0 | -6,075,054,366,014,210,000 | 36.323529 | 83 | 0.589441 | false |
dfreedman55/LearningPython | week4/exercise3.py | 1 | 2047 | #!/usr/bin/env python
def main():
parsestring(uptime1)
sumsec(stats)
parsestring(uptime2)
sumsec(stats)
parsestring(uptime3)
sumsec(stats)
parsestring(uptime4)
sumsec(stats)
def yrs2sec(numyrs):
seconds = numyrs * 12 * 4 * 7 * 24 * 60 * 60
stats['years'] = seconds
def mth2sec(nummth):
seconds = nummth * 4 * 7 * 24 * 60 * 60
stats['months'] = seconds
def wks2sec(numwks):
seconds = numwks * 7 * 24 * 60 * 60
stats['weeks'] = seconds
def dys2sec(numdys):
seconds = numdys * 24 * 60 * 60
stats['days'] = seconds
def hrs2sec(numhrs):
seconds = numhrs * 60 * 60
stats['hours'] = seconds
def min2sec(nummin):
seconds = nummin * 60
stats['minutes'] = seconds
def sumsec(stats):
total = int(0)
for k, v in stats.items():
if type(v) != type('string'):
total = total + v
print stats
print '\n'
print 'Total Seconds for %s is: %s' % (stats['devicename'], total)
print '\n'
def parsestring(uptimestr):
stats['devicename'] = uptimestr.split(' ')[0]
if 'year' in uptimestr:
numyrs = int(uptimestr.split('year')[0].strip().split(' ')[-1])
yrs2sec(numyrs)
if 'month' in uptimestr:
nummth = int(uptimestr.split('month')[0].strip().split(' ')[-1])
mth2sec(nummth)
if 'week' in uptimestr:
numwks = int(uptimestr.split('week')[0].strip().split(' ')[-1])
wks2sec(numwks)
if 'day' in uptimestr:
numdys = int(uptimestr.split('day')[0].strip().split(' ')[-1])
dys2sec(numdys)
if 'hour' in uptimestr:
numhrs = int(uptimestr.split('hour')[0].strip().split(' ')[-1])
hrs2sec(numhrs)
if 'minute' in uptimestr:
nummin = int(uptimestr.split('minute')[0].strip().split(' ')[-1])
min2sec(nummin)
if __name__ == '__main__':
uptime1 = 'twb-sf-881 uptime is 6 weeks, 4 days, 2 hours, 25 minutes'
uptime2 = '3750RJ uptime is 1 hour, 29 minutes'
uptime3 = 'CATS3560 uptime is 8 weeks, 4 days, 18 hours, 16 minutes'
uptime4 = 'rtr1 uptime is 5 years, 18 weeks, 8 hours, 23 minutes'
stats = {'devicename': '', 'years': '', 'months': '', 'weeks': '', 'days': '', 'hours': '', 'minutes': ''}
main()
| gpl-2.0 | 8,404,460,959,718,783,000 | 23.662651 | 107 | 0.635076 | false |
sgordon007/jcvi_062915 | assembly/preprocess.py | 1 | 22843 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Wrapper to trim and correct sequence data.
"""
import os
import os.path as op
import sys
import logging
from jcvi.formats.base import BaseFile, write_file, must_open
from jcvi.formats.fastq import guessoffset
from jcvi.utils.cbook import depends, human_size
from jcvi.apps.base import OptionParser, ActionDispatcher, download, \
sh, mkdir, need_update, datadir
class FastQCdata (BaseFile, dict):
def __init__(self, filename, human=False):
super(FastQCdata, self).__init__(filename)
if not op.exists(filename):
logging.debug("File `{0}` not found.".format(filename))
# Sample_RF37-1/RF37-1_GATCAG_L008_R2_fastqc =>
# RF37-1_GATCAG_L008_R2
self["Filename"] = op.basename(\
op.split(filename)[0]).rsplit("_", 1)[0]
self["Total Sequences"] = self["Sequence length"] = \
self["Total Bases"] = "na"
return
fp = open(filename)
for row in fp:
atoms = row.rstrip().split("\t")
if atoms[0] in ("#", ">"):
continue
if len(atoms) != 2:
continue
a, b = atoms
self[a] = b
ts = self["Total Sequences"]
sl = self["Sequence length"]
if "-" in sl:
a, b = sl.split("-")
sl = (int(a) + int(b)) / 2
if a == "30":
sl = int(b)
ts, sl = int(ts), int(sl)
tb = ts * sl
self["Total Sequences"] = human_size(ts).rstrip("b") if human else ts
self["Total Bases"] = human_size(tb).rstrip("b") if human else tb
def main():
actions = (
('count', 'count reads based on FASTQC results'),
('trim', 'trim reads using TRIMMOMATIC'),
('correct', 'correct reads using ALLPATHS-LG'),
('hetsmooth', 'reduce K-mer diversity using het-smooth'),
('alignextend', 'increase read length by extending based on alignments'),
('contamination', 'check reads contamination against Ecoli'),
('diginorm', 'run K-mer based normalization'),
('expand', 'expand sequences using short reads'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def diginorm(args):
"""
%prog diginorm fastqfile
Run K-mer based normalization. Based on tutorial:
<http://ged.msu.edu/angus/diginorm-2012/tutorial.html>
Assume input is either an interleaved pairs file, or two separate files.
To set up khmer:
$ git clone git://github.com/ged-lab/screed.git
$ git clone git://github.com/ged-lab/khmer.git
$ cd screed
$ python setup.py install
$ cd ../khmer
$ make test
$ export PYTHONPATH=~/export/khmer
"""
from jcvi.formats.fastq import shuffle, pairinplace, split
from jcvi.apps.base import getfilesize
p = OptionParser(diginorm.__doc__)
p.add_option("--single", default=False, action="store_true",
help="Single end reads")
p.add_option("--tablesize", help="Memory size")
p.add_option("--npass", default="1", choices=("1", "2"),
help="How many passes of normalization")
p.set_depth(depth=50)
p.set_home("khmer", default="/usr/local/bin/")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
if len(args) == 2:
fastq = shuffle(args + ["--tag"])
else:
fastq, = args
kh = opts.khmer_home
depth = opts.depth
PE = not opts.single
sys.path.insert(0, op.join(kh, "python"))
pf = fastq.rsplit(".", 1)[0]
keepfile = fastq + ".keep"
hashfile = pf + ".kh"
mints = 10000000
ts = opts.tablesize or ((getfilesize(fastq) / 16 / mints + 1) * mints)
norm_cmd = op.join(kh, "normalize-by-median.py")
filt_cmd = op.join(kh, "filter-abund.py")
if need_update(fastq, (hashfile, keepfile)):
cmd = norm_cmd
cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth, ts)
if PE:
cmd += " -p"
cmd += " -s {0} {1}".format(hashfile, fastq)
sh(cmd)
abundfiltfile = keepfile + ".abundfilt"
if need_update((hashfile, keepfile), abundfiltfile):
cmd = filt_cmd
cmd += " {0} {1}".format(hashfile, keepfile)
sh(cmd)
if opts.npass == "1":
seckeepfile = abundfiltfile
else:
seckeepfile = abundfiltfile + ".keep"
if need_update(abundfiltfile, seckeepfile):
cmd = norm_cmd
cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth - 10, ts / 2)
cmd += " {0}".format(abundfiltfile)
sh(cmd)
if PE:
pairsfile = pairinplace([seckeepfile,
"--base={0}".format(pf + "_norm"), "--rclip=2"])
split([pairsfile])
def expand(args):
"""
%prog expand bes.fasta reads.fastq
Expand sequences using short reads. Useful, for example for getting BAC-end
sequences. The template to use, in `bes.fasta` may just contain the junction
sequences, then align the reads to get the 'flanks' for such sequences.
"""
import math
from jcvi.formats.fasta import Fasta, SeqIO
from jcvi.formats.fastq import readlen, first, fasta
from jcvi.formats.blast import Blast
from jcvi.formats.base import FileShredder
from jcvi.apps.bowtie import align, get_samfile
from jcvi.apps.align import blast
p = OptionParser(expand.__doc__)
p.set_depth(depth=200)
p.set_firstN()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bes, reads = args
size = Fasta(bes).totalsize
rl = readlen([reads])
expected_size = size + 2 * rl
nreads = expected_size * opts.depth / rl
nreads = int(math.ceil(nreads / 1000.)) * 1000
# Attract reads
samfile, logfile = align([bes, reads, "--reorder", "--mapped",
"--firstN={0}".format(opts.firstN)])
samfile, mapped, _ = get_samfile(reads, bes, bowtie=True, mapped=True)
logging.debug("Extract first {0} reads from `{1}`.".format(nreads, mapped))
pf = mapped.split(".")[0]
pf = pf.split("-")[0]
bespf = bes.split(".")[0]
reads = pf + ".expand.fastq"
first([str(nreads), mapped, "-o", reads])
# Perform mini-assembly
fastafile = reads.rsplit(".", 1)[0] + ".fasta"
qualfile = ""
if need_update(reads, fastafile):
fastafile, qualfile = fasta([reads])
contigs = op.join(pf, "454LargeContigs.fna")
if need_update(fastafile, contigs):
cmd = "runAssembly -o {0} -cpu 8 {1}".format(pf, fastafile)
sh(cmd)
assert op.exists(contigs)
# Annotate contigs
blastfile = blast([bes, contigs])
mapping = {}
for query, b in Blast(blastfile).iter_best_hit():
mapping[query] = b
f = Fasta(contigs, lazy=True)
annotatedfasta = ".".join((pf, bespf, "fasta"))
fw = open(annotatedfasta, "w")
keys = list(Fasta(bes).iterkeys_ordered()) # keep an ordered list
recs = []
for key, v in f.iteritems_ordered():
vid = v.id
if vid not in mapping:
continue
b = mapping[vid]
subject = b.subject
rec = v.reverse_complement() if b.orientation == '-' else v
rec.id = rid = "_".join((pf, vid, subject))
rec.description = ""
recs.append((keys.index(subject), rid, rec))
recs = [x[-1] for x in sorted(recs)]
SeqIO.write(recs, fw, "fasta")
fw.close()
FileShredder([samfile, logfile, mapped, reads, fastafile, qualfile, blastfile, pf])
logging.debug("Annotated seqs (n={0}) written to `{1}`.".\
format(len(recs), annotatedfasta))
return annotatedfasta
def contamination(args):
"""
%prog contamination Ecoli.fasta genome.fasta read.fastq
Check read contamination on a folder of paired reads. Use bowtie2 to compare
the reads against:
1. Ecoli.fsata - this will tell us the lower bound of contamination
2. genome.fasta - this will tell us the upper bound of contamination
"""
from jcvi.apps.bowtie import BowtieLogFile, align
p = OptionParser(contamination.__doc__)
p.set_firstN()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ecoli, genome, fq = args
firstN_opt = "--firstN={0}".format(opts.firstN)
samfile, logfile = align([ecoli, fq, firstN_opt])
bl = BowtieLogFile(logfile)
lowerbound = bl.rate
samfile, logfile = align([genome, fq, firstN_opt])
bl = BowtieLogFile(logfile)
upperbound = 100 - bl.rate
median = (lowerbound + upperbound) / 2
clogfile = fq + ".Ecoli"
fw = open(clogfile, "w")
lowerbound = "{0:.1f}".format(lowerbound)
upperbound = "{0:.1f}".format(upperbound)
median = "{0:.1f}".format(median)
print >> fw, "\t".join((fq, lowerbound, median, upperbound))
print >> sys.stderr, "{0}: Ecoli contamination rate {1}-{2}".\
format(fq, lowerbound, upperbound)
fw.close()
def alignextend(args):
"""
%prog alignextend ref.fasta read.1.fastq read.2.fastq
Wrapper around AMOS alignextend.
"""
choices = "prepare,align,filter,rmdup,genreads".split(",")
p = OptionParser(alignextend.__doc__)
p.add_option("--nosuffix", default=False, action="store_true",
help="Do not add /1/2 suffix to the read [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="Reverse complement the reads before alignment")
p.add_option("--len", default=100, type="int",
help="Extend to this length")
p.add_option("--stage", default="prepare", choices=choices,
help="Start from certain stage")
p.add_option("--dup", default=10, type="int",
help="Filter duplicates with coordinates within this distance")
p.add_option("--maxdiff", default=1, type="int",
help="Maximum number of differences")
p.set_home("amos")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ref, r1, r2 = args
pf = op.basename(r1).split(".")[0]
cmd = op.join(opts.amos_home, "src/Experimental/alignextend.pl")
if not opts.nosuffix:
cmd += " -suffix"
bwa_idx = "{0}.ref.fa.sa".format(pf)
if not need_update(ref, bwa_idx):
cmd += " -noindex"
cmd += " -threads {0}".format(opts.cpus)
offset = guessoffset([r1])
if offset == 64:
cmd += " -I"
if opts.rc:
cmd += " -rc"
cmd += " -allow -len {0} -dup {1}".format(opts.len, opts.dup)
cmd += " -min {0} -max {1}".format(2 * opts.len, 20 * opts.len)
cmd += " -maxdiff {0}".format(opts.maxdiff)
cmd += " -stage {0}".format(opts.stage)
cmd += " ".join(("", pf, ref, r1, r2))
sh(cmd)
def count(args):
"""
%prog count *.gz
Count reads based on FASTQC results. FASTQC needs to be run on all the input
data given before running this command.
"""
from jcvi.utils.table import loadtable, write_csv
p = OptionParser(count.__doc__)
p.add_option("--dir",
help="Sub-directory where FASTQC was run [default: %default]")
p.add_option("--human", default=False, action="store_true",
help="Human friendly numbers [default: %default]")
p.set_table()
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
filenames = args
subdir = opts.dir
header = "Filename|Total Sequences|Sequence length|Total Bases".split("|")
rows = []
human = opts.human
for f in filenames:
folder = f.replace(".gz", "").rsplit(".", 1)[0] + "_fastqc"
if subdir:
folder = op.join(subdir, folder)
summaryfile = op.join(folder, "fastqc_data.txt")
fqcdata = FastQCdata(summaryfile, human=human)
row = [fqcdata[x] for x in header]
rows.append(row)
print >> sys.stderr, loadtable(header, rows)
write_csv(header, rows, sep=opts.sep,
filename=opts.outfile, align=opts.align)
def hetsmooth(args):
"""
%prog hetsmooth reads_1.fq reads_2.fq jf-23_0
Wrapper against het-smooth. Below is the command used in het-smooth manual.
$ het-smooth --kmer-len=23 --bottom-threshold=38 --top-threshold=220
--no-multibase-replacements --jellyfish-hash-file=23-mers.jf
reads_1.fq reads_2.fq
"""
p = OptionParser(hetsmooth.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.add_option("-L", type="int",
help="Bottom threshold, first min [default: %default]")
p.add_option("-U", type="int",
help="Top threshold, second min [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
reads1fq, reads2fq, jfdb = args
K = opts.K
L = opts.L
U = opts.U
assert L is not None and U is not None, "Please specify -L and -U"
cmd = "het-smooth --kmer-len={0}".format(K)
cmd += " --bottom-threshold={0} --top-threshold={1}".format(L, U)
cmd += " --no-multibase-replacements --jellyfish-hash-file={0}".format(jfdb)
cmd += " --no-reads-log"
cmd += " " + " ".join((reads1fq, reads2fq))
sh(cmd)
def trim(args):
"""
%prog trim fastqfiles
Trim reads using TRIMMOMATIC. If two fastqfiles are given, then it invokes
the paired reads mode. See manual:
<http://www.usadellab.org/cms/index.php?page=trimmomatic>
"""
tv = "0.32"
TrimJar = "trimmomatic-{0}.jar".format(tv)
phdchoices = ("33", "64")
p = OptionParser(trim.__doc__)
p.add_option("--path", default=op.join("~/bin", TrimJar),
help="Path to trimmomatic jar file [default: %default]")
p.add_option("--phred", default=None, choices=phdchoices,
help="Phred score offset [default: guess]")
p.add_option("--nofrags", default=False, action="store_true",
help="Discard frags file in PE mode [default: %default]")
p.add_option("--minqv", default=15, type="int",
help="Average qv after trimming [default: %default]")
p.add_option("--minlen", default=36, type="int",
help="Minimum length after trimming [default: %default]")
p.add_option("--adapteronly", default=False, action="store_true",
help="Only trim adapters with no qv trimming [default: %default]")
p.add_option("--nogz", default=False, action="store_true",
help="Do not write to gzipped files [default: %default]")
p.add_option("--log", default=None, dest="trimlog",
help="Specify a `trimlog` file [default: %default]")
p.set_cpus(cpus=4)
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
path = op.expanduser(opts.path)
url = \
"http://www.usadellab.org/cms/uploads/supplementary/Trimmomatic/Trimmomatic-{0}.zip"\
.format(tv)
if not op.exists(path):
path = download(url)
TrimUnzipped = "Trimmomatic-" + tv
if not op.exists(TrimUnzipped):
sh("unzip " + path)
os.remove(path)
path = op.join(TrimUnzipped, TrimJar)
assert op.exists(path), \
"Couldn't find Trimmomatic jar file at `{0}`".\
format(path)
adaptersfile = "adapters.fasta"
Adapters = must_open(op.join(datadir, adaptersfile)).read()
write_file(adaptersfile, Adapters, skipcheck=True)
assert op.exists(adaptersfile), \
"Please place the illumina adapter sequence in `{0}`".\
format(adaptersfile)
if opts.phred is None:
offset = guessoffset([args[0]])
else:
offset = int(opts.phred)
phredflag = " -phred{0}".format(offset)
threadsflag = " -threads {0}".format(opts.cpus)
if opts.trimlog:
trimlog = " -trimlog {0}".format(opts.trimlog)
cmd = "java -Xmx4g -jar {0}".format(path)
frags = ".frags.fastq"
pairs = ".pairs.fastq"
if not opts.nogz:
frags += ".gz"
pairs += ".gz"
get_prefix = lambda x: op.basename(x).replace(".gz", "").rsplit(".", 1)[0]
if len(args) == 1:
cmd += " SE"
cmd += phredflag
cmd += threadsflag
if opts.trimlog:
cmd += trimlog
fastqfile, = args
prefix = get_prefix(fastqfile)
frags1 = prefix + frags
cmd += " {0}".format(" ".join((fastqfile, frags1)))
else:
cmd += " PE"
cmd += phredflag
cmd += threadsflag
if opts.trimlog:
cmd += trimlog
fastqfile1, fastqfile2 = args
prefix1 = get_prefix(fastqfile1)
prefix2 = get_prefix(fastqfile2)
pairs1 = prefix1 + pairs
pairs2 = prefix2 + pairs
frags1 = prefix1 + frags
frags2 = prefix2 + frags
if opts.nofrags:
frags1 = "/dev/null"
frags2 = "/dev/null"
cmd += " {0}".format(" ".join((fastqfile1, fastqfile2, \
pairs1, frags1, pairs2, frags2)))
cmd += " ILLUMINACLIP:{0}:2:30:10".format(adaptersfile)
if not opts.adapteronly:
cmd += " LEADING:3 TRAILING:3"
cmd += " SLIDINGWINDOW:4:{0}".format(opts.minqv)
cmd += " MINLEN:{0}".format(opts.minlen)
if offset != 33:
cmd += " TOPHRED33"
sh(cmd)
@depends
def run_RemoveDodgyReads(infile=None, outfile=None, workdir=None,
removeDuplicates=True, rc=False, nthreads=32):
# orig.fastb => filt.fastb
assert op.exists(infile)
orig = infile.rsplit(".", 1)[0]
filt = outfile.rsplit(".", 1)[0]
cmd = "RemoveDodgyReads IN_HEAD={0} OUT_HEAD={1}".format(orig, filt)
if not removeDuplicates:
cmd += " REMOVE_DUPLICATES=False"
if rc:
cmd += " RC=True"
cmd += nthreads
sh(cmd)
@depends
def run_FastbAndQualb2Fastq(infile=None, outfile=None, rc=False):
corr = op.basename(infile).rsplit(".", 1)[0]
cmd = "FastbQualbToFastq HEAD_IN={0} HEAD_OUT={0}".format(corr)
cmd += " PAIRED=False PHRED_OFFSET=33"
if rc:
cmd += " FLIP=True"
sh(cmd)
@depends
def run_pairs(infile=None, outfile=None):
from jcvi.assembly.allpaths import pairs
pairs(infile)
def correct(args):
"""
%prog correct *.fastq
Correct the fastqfile and generated corrected fastqfiles. This calls
assembly.allpaths.prepare() to generate input files for ALLPATHS-LG. The
naming convention for your fastqfiles are important, and are listed below.
By default, this will correct all PE reads, and remove duplicates of all MP
reads, and results will be placed in `frag_reads.corr.{pairs,frags}.fastq`
and `jump_reads.corr.{pairs,frags}.fastq`.
"""
from jcvi.assembly.allpaths import prepare
from jcvi.assembly.base import FastqNamings
p = OptionParser(correct.__doc__ + FastqNamings)
p.add_option("--dir", default="data",
help="Working directory [default: %default]")
p.add_option("--fragsdedup", default=False, action="store_true",
help="Don't deduplicate the fragment reads [default: %default]")
p.add_option("--ploidy", default="2", choices=("1", "2"),
help="Ploidy [default: %default]")
p.add_option("--haploidify", default=False, action="store_true",
help="Set HAPLOIDIFY=True [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastq = args
tag, tagj = "frag_reads", "jump_reads"
ploidy = opts.ploidy
haploidify = opts.haploidify
assert (not haploidify) or (haploidify and ploidy == '2')
prepare(["Unknown"] + fastq + ["--norun"])
datadir = opts.dir
mkdir(datadir)
fullpath = op.join(os.getcwd(), datadir)
nthreads = " NUM_THREADS={0}".format(opts.cpus)
phred64 = (guessoffset([args[0]]) == 64)
orig = datadir + "/{0}_orig".format(tag)
origfastb = orig + ".fastb"
if need_update(fastq, origfastb):
cmd = "PrepareAllPathsInputs.pl DATA_DIR={0} HOSTS='{1}' PLOIDY={2}".\
format(fullpath, opts.cpus, ploidy)
if phred64:
cmd += " PHRED_64=True"
sh(cmd)
if op.exists(origfastb):
correct_frag(datadir, tag, origfastb, nthreads, dedup=opts.fragsdedup,
haploidify=haploidify)
origj = datadir + "/{0}_orig".format(tagj)
origjfastb = origj + ".fastb"
if op.exists(origjfastb):
correct_jump(datadir, tagj, origjfastb, nthreads)
def export_fastq(datadir, corrfastb, rc=False):
pf = op.basename(corrfastb.rsplit(".", 1)[0])
cwd = os.getcwd()
os.chdir(datadir)
corrfastq = pf + ".fastq"
run_FastbAndQualb2Fastq(infile=op.basename(corrfastb), \
outfile=corrfastq, rc=rc)
os.chdir(cwd)
pairsfile = pf + ".pairs"
fragsfastq = pf + ".corr.fastq"
run_pairs(infile=[op.join(datadir, pairsfile), op.join(datadir, corrfastq)],
outfile=fragsfastq)
def correct_frag(datadir, tag, origfastb, nthreads,
dedup=False, haploidify=False):
filt = datadir + "/{0}_filt".format(tag)
filtfastb = filt + ".fastb"
run_RemoveDodgyReads(infile=origfastb, outfile=filtfastb,
removeDuplicates=dedup, rc=False, nthreads=nthreads)
filtpairs = filt + ".pairs"
edit = datadir + "/{0}_edit".format(tag)
editpairs = edit + ".pairs"
if need_update(filtpairs, editpairs):
cmd = "ln -sf {0} {1}.pairs".format(op.basename(filtpairs), edit)
sh(cmd)
editfastb = edit + ".fastb"
if need_update(filtfastb, editfastb):
cmd = "FindErrors HEAD_IN={0} HEAD_OUT={1}".format(filt, edit)
cmd += " PLOIDY_FILE=data/ploidy"
cmd += nthreads
sh(cmd)
corr = datadir + "/{0}_corr".format(tag)
corrfastb = corr + ".fastb"
if need_update(editfastb, corrfastb):
cmd = "CleanCorrectedReads DELETE=True"
cmd += " HEAD_IN={0} HEAD_OUT={1}".format(edit, corr)
cmd += " PLOIDY_FILE={0}/ploidy".format(datadir)
if haploidify:
cmd += " HAPLOIDIFY=True"
cmd += nthreads
sh(cmd)
export_fastq(datadir, corrfastb)
def correct_jump(datadir, tagj, origjfastb, nthreads):
# Pipeline for jump reads does not involve correction
filt = datadir + "/{0}_filt".format(tagj)
filtfastb = filt + ".fastb"
run_RemoveDodgyReads(infile=origjfastb, outfile=filtfastb, \
removeDuplicates=True, rc=True, nthreads=nthreads)
export_fastq(datadir, filtfastb, rc=True)
if __name__ == '__main__':
main()
| bsd-2-clause | 9,061,038,318,958,718,000 | 31.773314 | 89 | 0.590246 | false |
kmike/morphine | morphine/pos_model.py | 1 | 1683 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from morphine import features
from morphine.feature_extractor import FeatureExtractor
from morphine.basetagger import PartialTagger
from pymorphy2.tagset import OpencorporaTag
class POSFeatureExtractor(FeatureExtractor):
IGNORE = {
'Arch', 'intg', 'real', '1per', '2per', '3per', 'GNdr', 'Ms-f',
'anim', 'inan',
'masc', 'femn', 'neut',
'Geox', 'Name',
} | OpencorporaTag.CASES | OpencorporaTag.NUMBERS | OpencorporaTag.MOODS \
| OpencorporaTag.INVOLVEMENT
def __init__(self):
super(POSFeatureExtractor, self).__init__(
token_features=[
features.bias,
features.token_lower,
features.suffix2,
features.suffix3,
features.Grammeme(threshold=0.01, add_unambig=False, ignore=self.IGNORE),
features.GrammemePair(threshold=0.01**2, add_unambig=False, ignore=self.IGNORE),
],
global_features=[
features.sentence_start,
features.sentence_end,
# features.the_only_verb,
features.Pattern([-1, 'token_lower']),
# features.Pattern([+1, 'token_lower']),
features.Pattern([-1, 'Grammeme']),
features.Pattern([+1, 'Grammeme']),
features.Pattern([-1, 'GrammemePair']),
features.Pattern([+1, 'GrammemePair']),
# features.Pattern([-1, 'GrammemePair'], [0, 'GrammemePair']),
],
)
class Tagger(PartialTagger):
def outval(self, tag):
return tag._POS
| mit | -2,966,662,456,207,913,000 | 32 | 96 | 0.562092 | false |
PhloxAR/phloxar | PhloxAR/compat.py | 1 | 1393 | # -*- coding: utf-8 -*-
"""
Compatibility module for Python 2.7 and > 3.3
"""
from __future__ import unicode_literals
import sys
import time
try:
import queue
except ImportError:
import Queue as queue
PY2 = sys.version < '3'
clock = None
if PY2:
unichr = unichr
long = long
fileopen = file
else:
unichr = chr
long = int
fileopen = open
if PY2:
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
else:
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values)
itervalues = lambda d: iter(d.items())
if PY2:
if sys.platform in ('win32', 'cygwin'):
clock = time.clock
else:
clock = time.time
else:
clock = time.perf_counter
if PY2:
from urllib2 import urlopen, build_opener
from urllib2 import HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm
else:
from urllib import urlopen
from urllib.request import build_opener, HTTPBasicAuthHandler
from urllib.request import HTTPPasswordMgrWithDefaultRealm
if PY2:
from UserDict import UserDict
from cStringIO import StringIO
import SocketServer as socketserver
import SimpleHTTPServer
else:
from collections import UserDict, MutableMapping
import http.server as SimpleHTTPServer
import io.StringIO as StringIO
import socketserver
| apache-2.0 | -4,027,996,947,203,010,000 | 20.765625 | 77 | 0.694903 | false |
ganeti/ganeti | test/py/ganeti.server.rapi_unittest.py | 1 | 9885 | #!/usr/bin/python3
#
# Copyright (C) 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.server.rapi"""
import re
import unittest
import random
from io import StringIO
from ganeti import constants
from ganeti import utils
from ganeti import compat
from ganeti import errors
from ganeti import serializer
from ganeti import rapi
from ganeti import http
from ganeti import objects
import ganeti.rapi.baserlib
import ganeti.rapi.testutils
import ganeti.rapi.rlib2
import ganeti.http.auth
import testutils
class TestRemoteApiHandler(unittest.TestCase):
@staticmethod
def _LookupWrongUser(_):
return None
def _Test(self, method, path, headers, reqbody,
user_fn=NotImplemented, luxi_client=NotImplemented,
reqauth=False):
rm = rapi.testutils._RapiMock(user_fn, luxi_client, reqauth=reqauth)
(resp_code, resp_headers, resp_body) = \
rm.FetchResponse(path, method, http.ParseHeaders(StringIO(headers)),
reqbody)
self.assertTrue(resp_headers[http.HTTP_DATE])
self.assertEqual(resp_headers[http.HTTP_CONNECTION], "close")
self.assertEqual(resp_headers[http.HTTP_CONTENT_TYPE], http.HTTP_APP_JSON)
self.assertEqual(resp_headers[http.HTTP_SERVER], http.HTTP_GANETI_VERSION)
return (resp_code, resp_headers, serializer.LoadJson(resp_body))
def testRoot(self):
(code, _, data) = self._Test(http.HTTP_GET, "/", "", None)
self.assertEqual(code, http.HTTP_OK)
self.assertTrue(data is None)
def testRootReqAuth(self):
(code, _, _) = self._Test(http.HTTP_GET, "/", "", None, reqauth=True)
self.assertEqual(code, http.HttpUnauthorized.code)
def testVersion(self):
(code, _, data) = self._Test(http.HTTP_GET, "/version", "", None)
self.assertEqual(code, http.HTTP_OK)
self.assertEqual(data, constants.RAPI_VERSION)
def testSlashTwo(self):
(code, _, data) = self._Test(http.HTTP_GET, "/2", "", None)
self.assertEqual(code, http.HTTP_OK)
self.assertTrue(data is None)
def testFeatures(self):
(code, _, data) = self._Test(http.HTTP_GET, "/2/features", "", None)
self.assertEqual(code, http.HTTP_OK)
self.assertEqual(set(data), set(rapi.rlib2.ALL_FEATURES))
def testPutInstances(self):
(code, _, data) = self._Test(http.HTTP_PUT, "/2/instances", "", None)
self.assertEqual(code, http.HttpNotImplemented.code)
self.assertTrue(data["message"].startswith("Method PUT is unsupported"))
def testPostInstancesNoAuth(self):
(code, _, _) = self._Test(http.HTTP_POST, "/2/instances", "", None)
self.assertEqual(code, http.HttpUnauthorized.code)
def testRequestWithUnsupportedMediaType(self):
for fn in [lambda s: s, lambda s: s.upper(), lambda s: s.title()]:
headers = rapi.testutils._FormatHeaders([
"%s: %s" % (http.HTTP_CONTENT_TYPE, fn("un/supported/media/type")),
])
(code, _, data) = self._Test(http.HTTP_GET, "/", headers, "body")
self.assertEqual(code, http.HttpUnsupportedMediaType.code)
self.assertEqual(data["message"], "Unsupported Media Type")
def testRequestWithInvalidJsonData(self):
body = "_this/is/no'valid.json"
self.assertRaises(Exception, serializer.LoadJson, body)
headers = rapi.testutils._FormatHeaders([
"%s: %s" % (http.HTTP_CONTENT_TYPE, http.HTTP_APP_JSON),
])
(code, _, data) = self._Test(http.HTTP_GET, "/", headers, body)
self.assertEqual(code, http.HttpBadRequest.code)
self.assertEqual(data["message"], "Unable to parse JSON data")
def testUnsupportedAuthScheme(self):
headers = rapi.testutils._FormatHeaders([
"%s: %s" % (http.HTTP_AUTHORIZATION, "Unsupported scheme"),
])
(code, _, _) = self._Test(http.HTTP_POST, "/2/instances", headers, "")
self.assertEqual(code, http.HttpUnauthorized.code)
def testIncompleteBasicAuth(self):
headers = rapi.testutils._FormatHeaders([
"%s: Basic" % http.HTTP_AUTHORIZATION,
])
(code, _, data) = self._Test(http.HTTP_POST, "/2/instances", headers, "")
self.assertEqual(code, http.HttpBadRequest.code)
self.assertEqual(data["message"],
"Basic authentication requires credentials")
def testInvalidBasicAuth(self):
for auth in ["!invalid=base!64.", testutils.b64encode_string(" "),
testutils.b64encode_string("missingcolonchar")]:
headers = rapi.testutils._FormatHeaders([
"%s: Basic %s" % (http.HTTP_AUTHORIZATION, auth),
])
(code, _, data) = self._Test(http.HTTP_POST, "/2/instances", headers, "")
self.assertEqual(code, http.HttpUnauthorized.code)
@staticmethod
def _MakeAuthHeaders(username, password, correct_password):
if correct_password:
pw = password
else:
pw = "wrongpass"
authtok = "%s:%s" % (username, pw)
return rapi.testutils._FormatHeaders([
"%s: Basic %s" % (http.HTTP_AUTHORIZATION,
testutils.b64encode_string(authtok)),
"%s: %s" % (http.HTTP_CONTENT_TYPE, http.HTTP_APP_JSON),
])
def testQueryAuth(self):
username = "admin"
password = "2046920054"
header_fn = compat.partial(self._MakeAuthHeaders, username, password)
def _LookupUserNoWrite(name):
if name == username:
return http.auth.PasswordFileUser(name, password, [])
else:
return None
for access in [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]:
def _LookupUserWithWrite(name):
if name == username:
return http.auth.PasswordFileUser(name, password, [
access,
])
else:
return None
for qr in constants.QR_VIA_RAPI:
# The /2/query resource has somewhat special rules for authentication as
# it can be used to retrieve critical information
path = "/2/query/%s" % qr
for method in rapi.baserlib._SUPPORTED_METHODS:
# No authorization
(code, _, _) = self._Test(method, path, "", "")
if method in (http.HTTP_DELETE, http.HTTP_POST):
self.assertEqual(code, http.HttpNotImplemented.code)
continue
self.assertEqual(code, http.HttpUnauthorized.code)
# Incorrect user
(code, _, _) = self._Test(method, path, header_fn(True), "",
user_fn=self._LookupWrongUser)
self.assertEqual(code, http.HttpUnauthorized.code)
# User has no write access, but the password is correct
(code, _, _) = self._Test(method, path, header_fn(True), "",
user_fn=_LookupUserNoWrite)
self.assertEqual(code, http.HttpForbidden.code)
# Wrong password and no write access
(code, _, _) = self._Test(method, path, header_fn(False), "",
user_fn=_LookupUserNoWrite)
self.assertEqual(code, http.HttpUnauthorized.code)
# Wrong password with write access
(code, _, _) = self._Test(method, path, header_fn(False), "",
user_fn=_LookupUserWithWrite)
self.assertEqual(code, http.HttpUnauthorized.code)
# Prepare request information
if method == http.HTTP_PUT:
reqpath = path
body = serializer.DumpJson({
"fields": ["name"],
})
elif method == http.HTTP_GET:
reqpath = "%s?fields=name" % path
body = ""
else:
self.fail("Unknown method '%s'" % method)
# User has write access, password is correct
(code, _, data) = self._Test(method, reqpath, header_fn(True), body,
user_fn=_LookupUserWithWrite,
luxi_client=_FakeLuxiClientForQuery)
self.assertEqual(code, http.HTTP_OK)
self.assertTrue(objects.QueryResponse.FromDict(data))
def testConsole(self):
path = "/2/instances/inst1.example.com/console"
for method in rapi.baserlib._SUPPORTED_METHODS:
for reqauth in [False, True]:
# No authorization
(code, _, _) = self._Test(method, path, "", "", reqauth=reqauth)
if method == http.HTTP_GET or reqauth:
self.assertEqual(code, http.HttpUnauthorized.code)
else:
self.assertEqual(code, http.HttpNotImplemented.code)
class _FakeLuxiClientForQuery:
def __init__(self, *args, **kwargs):
pass
def Query(self, *args):
return objects.QueryResponse(fields=[])
if __name__ == "__main__":
testutils.GanetiTestProgram()
| bsd-2-clause | -1,908,241,432,026,573,000 | 35.611111 | 80 | 0.647142 | false |
sdss/marvin | python/marvin/utils/datamodel/docudatamodel.py | 1 | 14193 | # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-11-21 11:56:56
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-07-19 15:42:46
from __future__ import print_function, division, absolute_import
from docutils import nodes
from docutils.parsers import rst
from docutils.parsers.rst import directives
from docutils import statemachine
import traceback
def _indent(text, level=1):
''' Format Bintypes '''
prefix = ' ' * (4 * level)
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if line.strip() else line)
return ''.join(prefixed_lines())
def _format_datacubes(datacubes):
''' Format Datacubes table '''
yield '.. list-table:: Datacubes'
yield _indent(':widths: 15 50 50 10 10 20 20')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Unit')
yield _indent(' - Ivar')
yield _indent(' - Mask')
yield _indent(' - FITS')
yield _indent(' - DB')
for datacube in datacubes:
dbcolumn = '{0}.{1}'.format(datacube.db_table, datacube.db_column())
yield _indent('* - {0}'.format(datacube.name))
yield _indent(' - {0}'.format(datacube.description))
yield _indent(' - {0}'.format(datacube.unit.to_string()))
yield _indent(' - {0}'.format(datacube.has_ivar()))
yield _indent(' - {0}'.format(datacube.has_mask()))
yield _indent(' - {0}'.format(datacube.fits_extension()))
yield _indent(' - {0}'.format(dbcolumn))
yield ''
def _format_rss(rss):
''' Format Rss table '''
yield '.. list-table:: Rss'
yield _indent(':widths: 15 50 50 10 10 20 20')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Unit')
yield _indent(' - Ivar')
yield _indent(' - Mask')
yield _indent(' - FITS')
yield _indent(' - DB')
for rs in rss:
dbcolumn = '{0}.{1}'.format(rs.db_table, rs.db_column())
yield _indent('* - {0}'.format(rs.name))
yield _indent(' - {0}'.format(rs.description))
yield _indent(' - {0}'.format(rs.unit.to_string()))
yield _indent(' - {0}'.format(rs.has_ivar()))
yield _indent(' - {0}'.format(rs.has_mask()))
yield _indent(' - {0}'.format(rs.fits_extension()))
yield _indent(' - {0}'.format(dbcolumn))
yield ''
def _format_spectra(spectra):
''' Format Spectra '''
yield '.. topic:: Spectra'
yield '.. list-table:: Spectra'
yield _indent(':widths: 15 100 20 20 20')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Unit')
yield _indent(' - FITS')
yield _indent(' - DB')
for spectrum in spectra:
dbcolumn = '{0}.{1}'.format(spectrum.db_table, spectrum.db_column())
yield _indent('* - {0}'.format(spectrum.name))
yield _indent(' - {0}'.format(spectrum.description))
yield _indent(' - {0}'.format(spectrum.unit.to_string()))
yield _indent(' - {0}'.format(spectrum.fits_extension()))
yield _indent(' - {0}'.format(dbcolumn))
yield ''
def _format_bintypes(bintypes):
''' Format Bintypes '''
yield '.. list-table:: Bintypes'
yield _indent(':widths: 15 100 10')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Binned')
for bintype in bintypes:
yield _indent('* - {0}'.format(bintype.name))
yield _indent(' - {0}'.format(bintype.description))
yield _indent(' - {0}'.format(bintype.binned))
yield ''
def _format_templates(templates):
''' Format Templates '''
yield '.. list-table:: Templates'
yield _indent(':widths: 15 100')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
for template in templates:
yield _indent('* - {0}'.format(template.name))
yield _indent(' - {0}'.format(template.description))
yield ''
def _format_models(models):
''' Format Models '''
yield '.. list-table:: Models'
yield _indent(':widths: 15 100 50 20 15 15')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Unit')
yield _indent(' - BinId')
yield _indent(' - Ivar')
yield _indent(' - Mask')
for model in models:
yield _indent('* - {0}'.format(model.name))
yield _indent(' - {0}'.format(model.description))
yield _indent(' - {0}'.format(model.unit))
yield _indent(' - {0}'.format(model.binid.name))
yield _indent(' - {0}'.format(model.has_ivar()))
yield _indent(' - {0}'.format(model.has_mask()))
yield ''
def _format_properties(properties):
''' Format Properties '''
exts = properties.extensions
yield '.. list-table:: Properties'
yield _indent(':widths: 15 100 100 15 15 50 100')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Channels')
yield _indent(' - Description')
yield _indent(' - Ivar')
yield _indent(' - Mask')
yield _indent(' - FITS')
yield _indent(' - DB')
for prop in exts:
yield _indent('* - {0}'.format(prop.name))
if 'MultiChannelProperty' in str(prop.__class__):
channels = ', '.join([c.name for c in prop.channels])
dbcolumn = ', '.join(['{0}.{1}'.format(prop.db_table, c) for c in prop.db_columns()])
else:
channels = prop.channel
dbcolumn = '{0}.{1}'.format(prop.db_table, prop.db_column())
yield _indent(' - {0}'.format(channels))
yield _indent(' - {0}'.format(prop.description))
yield _indent(' - {0}'.format(prop.ivar))
yield _indent(' - {0}'.format(prop.mask))
yield _indent(' - {0}'.format(prop.fits_extension()))
yield _indent(' - {0}'.format(dbcolumn))
yield ''
def _format_parameters(parameters):
''' Format Query Parameters '''
yield '.. topic:: Query Parameters'
yield '.. list-table:: Query Parameters'
yield _indent(':widths: 25 50 10 20 20 20 20')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Group')
yield _indent(' - Full Name')
yield _indent(' - Best')
yield _indent(' - Name')
yield _indent(' - DB Schema')
yield _indent(' - DB Table')
yield _indent(' - DB Column')
for param in parameters:
yield _indent('* - {0}'.format(param.group))
yield _indent(' - {0}'.format(param.full))
yield _indent(' - {0}'.format(param.best))
yield _indent(' - {0}'.format(param.name))
yield _indent(' - {0}'.format(param.db_schema))
yield _indent(' - {0}'.format(param.db_table))
yield _indent(' - {0}'.format(param.db_column))
yield ''
def _format_schema(schema):
''' Format a maskbit schema '''
schema_dict = schema.to_dict()
indices = schema_dict['bit'].keys()
yield '.. list-table:: Schema'
yield _indent(':widths: 5 50 50')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Bit')
yield _indent(' - Label')
yield _indent(' - Description')
for index in indices:
yield _indent('* - {0}'.format(schema_dict['bit'][index]))
yield _indent(' - {0}'.format(schema_dict['label'][index].strip()))
yield _indent(' - {0}'.format(schema_dict['description'][index].strip()))
yield ''
def _format_bitmasks(maskbit, bittype):
''' Format Maskbits '''
for name, mask in maskbit.items():
if bittype.lower() in name.lower():
#yield '.. program:: {0}'.format(name)
yield '{0}: {1}'.format(name, mask.description)
yield ''
for line in _format_schema(mask.schema):
yield line
def _format_vacs(vacs, release):
''' Format a vac schema '''
yield '.. list-table:: VACs'
yield _indent(':widths: 20 10 50')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Version')
yield _indent(' - Description')
for vac in vacs:
yield _indent('* - {0}'.format(vac.name))
yield _indent(' - {0}'.format(vac.version[release]))
yield _indent(' - {0}'.format(vac.description))
yield ''
def _format_command(name, command, **kwargs):
"""Format the output of `click.Command`."""
# docstring
# yield command.__doc__
# yield ''
# bintypes
if 'bintypes' in kwargs:
for line in _format_bintypes(command.bintypes):
yield line
# templates
if 'templates' in kwargs:
for line in _format_templates(command.templates):
yield line
# models
if 'models' in kwargs:
for line in _format_models(command.models):
yield line
# properties
if 'properties' in kwargs:
for line in _format_properties(command.properties):
yield line
# spectra
if 'spectra' in kwargs:
for line in _format_spectra(command.spectra):
yield line
# datacubes
if 'datacubes' in kwargs:
for line in _format_datacubes(command.datacubes):
yield line
# rss
if 'rss' in kwargs:
rssdm = kwargs.get('rssdm')
for line in _format_rss(rssdm.rss):
yield line
# query parameters
if 'parameters' in kwargs:
for line in _format_parameters(command.parameters):
yield line
# bitmasks
if 'bitmasks' in kwargs:
for line in _format_bitmasks(command.bitmasks, kwargs.get('bittype', None)):
yield line
# vacs
if 'vac' in kwargs:
vac_release = kwargs.get('vac', None)
if vac_release and vac_release in command:
vacdm = command[vac_release]
for line in _format_vacs(vacdm.vacs, vacdm.release):
yield line
class DataModelDirective(rst.Directive):
has_content = False
required_arguments = 1
option_spec = {
'prog': directives.unchanged_required,
'title': directives.unchanged,
'subtitle': directives.unchanged,
'description': directives.unchanged,
'bintypes': directives.flag,
'templates': directives.flag,
'models': directives.flag,
'properties': directives.flag,
'datacubes': directives.flag,
'rss': directives.flag,
'spectra': directives.flag,
'bitmasks': directives.flag,
'parameters': directives.flag,
'bittype': directives.unchanged,
'vac': directives.unchanged,
}
def _load_module(self, module_path):
"""Load the module."""
# __import__ will fail on unicode,
# so we ensure module path is a string here.
module_path = str(module_path)
try:
module_name, attr_name = module_path.split(':', 1)
except ValueError: # noqa
raise self.error('"{0}" is not of format "module:parser"'.format(module_path))
try:
mod = __import__(module_name, globals(), locals(), [attr_name])
except (Exception, SystemExit) as exc: # noqa
err_msg = 'Failed to import "{0}" from "{1}". '.format(attr_name, module_name)
if isinstance(exc, SystemExit):
err_msg += 'The module appeared to call sys.exit()'
else:
err_msg += 'The following exception was raised:\n{0}'.format(traceback.format_exc())
raise self.error(err_msg)
if not hasattr(mod, attr_name):
raise self.error('Module "{0}" has no attribute "{1}"'.format(module_name, attr_name))
return getattr(mod, attr_name)
def _generate_nodes(self, name, command, parent=None, options={}):
"""Generate the relevant Sphinx nodes.
Format a `click.Group` or `click.Command`.
:param name: Name of command, as used on the command line
:param command: Instance of `click.Group` or `click.Command`
:param parent: Instance of `click.Context`, or None
:param show_nested: Whether subcommands should be included in output
:returns: A list of nested docutil nodes
"""
# Title
source_name = name
content = [nodes.title(text=name)]
subtitle = self.options.get('subtitle', None)
description = self.options.get('description', None)
if subtitle:
content.append(nodes.subtitle(text=subtitle))
if description:
content.append(nodes.paragraph(text=description))
section = nodes.section(
'',
*content,
ids=[nodes.make_id(source_name)],
names=[nodes.fully_normalize_name(source_name)])
# Summary
result = statemachine.ViewList()
lines = _format_command(name, command, **options)
for line in lines:
result.append(line, source_name)
self.state.nested_parse(result, 0, section)
return [section]
def run(self):
self.env = self.state.document.settings.env
# load the designated class object from the module file
command = self._load_module(self.arguments[0])
# do something special to access the RSS datamodel
if 'rss' in self.options:
rssarg = self.arguments[0].split(':')[0] + ':datamodel_rss'
rssdms = self._load_module(rssarg)
rssdm = rssdms[command.release]
self.options['rssdm'] = rssdm
if 'prog' in self.options:
prog_name = self.options.get('prog')
else:
raise self.error(':prog: must be specified')
return self._generate_nodes(prog_name, command, None, options=self.options)
def setup(app):
app.add_directive('datamodel', DataModelDirective)
| bsd-3-clause | 6,809,721,066,906,021,000 | 30.262115 | 100 | 0.573804 | false |
richardliaw/ray | rllib/agents/ddpg/ddpg_torch_model.py | 1 | 7776 | import numpy as np
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch, get_activation_fn
torch, nn = try_import_torch()
class DDPGTorchModel(TorchModelV2, nn.Module):
"""Extension of standard TorchModelV2 for DDPG.
Data flow:
obs -> forward() -> model_out
model_out -> get_policy_output() -> pi(s)
model_out, actions -> get_q_values() -> Q(s, a)
model_out, actions -> get_twin_q_values() -> Q_twin(s, a)
Note that this class by itself is not a valid model unless you
implement forward() in a subclass."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
actor_hidden_activation="relu",
actor_hiddens=(256, 256),
critic_hidden_activation="relu",
critic_hiddens=(256, 256),
twin_q=False,
add_layer_norm=False):
"""Initialize variables of this model.
Extra model kwargs:
actor_hidden_activation (str): activation for actor network
actor_hiddens (list): hidden layers sizes for actor network
critic_hidden_activation (str): activation for critic network
critic_hiddens (list): hidden layers sizes for critic network
twin_q (bool): build twin Q networks.
add_layer_norm (bool): Enable layer norm (for param noise).
Note that the core layers for forward() are not defined here, this
only defines the layers for the output heads. Those layers for
forward() should be defined in subclasses of DDPGTorchModel.
"""
nn.Module.__init__(self)
super(DDPGTorchModel, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
self.bounded = np.logical_and(self.action_space.bounded_above,
self.action_space.bounded_below).any()
low_action = nn.Parameter(
torch.from_numpy(self.action_space.low).float())
low_action.requires_grad = False
self.register_parameter("low_action", low_action)
action_range = nn.Parameter(
torch.from_numpy(self.action_space.high -
self.action_space.low).float())
action_range.requires_grad = False
self.register_parameter("action_range", action_range)
self.action_dim = np.product(self.action_space.shape)
# Build the policy network.
self.policy_model = nn.Sequential()
ins = num_outputs
self.obs_ins = ins
activation = get_activation_fn(
actor_hidden_activation, framework="torch")
for i, n in enumerate(actor_hiddens):
self.policy_model.add_module(
"action_{}".format(i),
SlimFC(
ins,
n,
initializer=torch.nn.init.xavier_uniform_,
activation_fn=activation))
# Add LayerNorm after each Dense.
if add_layer_norm:
self.policy_model.add_module("LayerNorm_A_{}".format(i),
nn.LayerNorm(n))
ins = n
self.policy_model.add_module(
"action_out",
SlimFC(
ins,
self.action_dim,
initializer=torch.nn.init.xavier_uniform_,
activation_fn=None))
# Use sigmoid to scale to [0,1], but also double magnitude of input to
# emulate behaviour of tanh activation used in DDPG and TD3 papers.
# After sigmoid squashing, re-scale to env action space bounds.
class _Lambda(nn.Module):
def forward(self_, x):
sigmoid_out = nn.Sigmoid()(2.0 * x)
squashed = self.action_range * sigmoid_out + self.low_action
return squashed
# Only squash if we have bounded actions.
if self.bounded:
self.policy_model.add_module("action_out_squashed", _Lambda())
# Build the Q-net(s), including target Q-net(s).
def build_q_net(name_):
activation = get_activation_fn(
critic_hidden_activation, framework="torch")
# For continuous actions: Feed obs and actions (concatenated)
# through the NN. For discrete actions, only obs.
q_net = nn.Sequential()
ins = self.obs_ins + self.action_dim
for i, n in enumerate(critic_hiddens):
q_net.add_module(
"{}_hidden_{}".format(name_, i),
SlimFC(
ins,
n,
initializer=torch.nn.init.xavier_uniform_,
activation_fn=activation))
ins = n
q_net.add_module(
"{}_out".format(name_),
SlimFC(
ins,
1,
initializer=torch.nn.init.xavier_uniform_,
activation_fn=None))
return q_net
self.q_model = build_q_net("q")
if twin_q:
self.twin_q_model = build_q_net("twin_q")
else:
self.twin_q_model = None
def get_q_values(self, model_out, actions):
"""Return the Q estimates for the most recent forward pass.
This implements Q(s, a).
Args:
model_out (Tensor): obs embeddings from the model layers, of shape
[BATCH_SIZE, num_outputs].
actions (Tensor): Actions to return the Q-values for.
Shape: [BATCH_SIZE, action_dim].
Returns:
tensor of shape [BATCH_SIZE].
"""
return self.q_model(torch.cat([model_out, actions], -1))
def get_twin_q_values(self, model_out, actions):
"""Same as get_q_values but using the twin Q net.
This implements the twin Q(s, a).
Args:
model_out (Tensor): obs embeddings from the model layers, of shape
[BATCH_SIZE, num_outputs].
actions (Optional[Tensor]): Actions to return the Q-values for.
Shape: [BATCH_SIZE, action_dim].
Returns:
tensor of shape [BATCH_SIZE].
"""
return self.twin_q_model(torch.cat([model_out, actions], -1))
def get_policy_output(self, model_out):
"""Return the action output for the most recent forward pass.
This outputs the support for pi(s). For continuous action spaces, this
is the action directly. For discrete, is is the mean / std dev.
Args:
model_out (Tensor): obs embeddings from the model layers, of shape
[BATCH_SIZE, num_outputs].
Returns:
tensor of shape [BATCH_SIZE, action_out_size]
"""
return self.policy_model(model_out)
def policy_variables(self, as_dict=False):
"""Return the list of variables for the policy net."""
if as_dict:
return self.policy_model.state_dict()
return list(self.policy_model.parameters())
def q_variables(self, as_dict=False):
"""Return the list of variables for Q / twin Q nets."""
if as_dict:
return {
**self.q_model.state_dict(),
**(self.twin_q_model.state_dict() if self.twin_q_model else {})
}
return list(self.q_model.parameters()) + \
(list(self.twin_q_model.parameters()) if self.twin_q_model else [])
| apache-2.0 | 8,141,705,014,407,034,000 | 37.88 | 79 | 0.547968 | false |
dbtsai/python-mimeparse | mimeparse_test.py | 1 | 2663 | #!/usr/bin/env python
"""
Python tests for Mime-Type Parser.
This module loads a json file and converts the tests specified therein to a set
of PyUnitTestCases. Then it uses PyUnit to run them and report their status.
"""
import json
import unittest
import mimeparse
__version__ = "0.1"
__author__ = 'Ade Oshineye'
__email__ = "[email protected]"
__credits__ = ""
class MimeParseTestCase(unittest.TestCase):
def setUp(self):
super(MimeParseTestCase, self).setUp()
with open("testdata.json") as f:
self.test_data = json.load(f)
def _test_parse_media_range(self, args, expected):
expected = tuple(expected)
result = mimeparse.parse_media_range(args)
message = "Expected: '%s' but got %s" % (expected, result)
self.assertEqual(expected, result, message)
def _test_quality(self, args, expected):
result = mimeparse.quality(args[0], args[1])
message = "Expected: '%s' but got %s" % (expected, result)
self.assertEqual(expected, result, message)
def _test_best_match(self, args, expected, description):
if expected is None:
self.assertRaises(mimeparse.MimeTypeParseException,
mimeparse.best_match, args[0], args[1])
else:
result = mimeparse.best_match(args[0], args[1])
message = \
"Expected: '%s' but got %s. Description for this test: %s" % \
(expected, result, description)
self.assertEqual(expected, result, message)
def _test_parse_mime_type(self, args, expected):
if expected is None:
self.assertRaises(mimeparse.MimeTypeParseException,
mimeparse.parse_mime_type, args)
else:
expected = tuple(expected)
result = mimeparse.parse_mime_type(args)
message = "Expected: '%s' but got %s" % (expected, result)
self.assertEqual(expected, result, message)
def test_parse_media_range(self):
for args, expected in self.test_data['parse_media_range']:
self._test_parse_media_range(args, expected)
def test_quality(self):
for args, expected in self.test_data['quality']:
self._test_quality(args, expected)
def test_best_match(self):
for args, expected, description in self.test_data['best_match']:
self._test_best_match(args, expected, description)
def test_parse_mime_type(self):
for args, expected in self.test_data['parse_mime_type']:
self._test_parse_mime_type(args, expected)
if __name__ == '__main__':
unittest.main()
| mit | -5,316,706,622,492,868,000 | 33.584416 | 79 | 0.61472 | false |
FedoraScientific/salome-yacs | src/pyqt/gui/CItems.py | 1 | 17573 | # Copyright (C) 2006-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
import sys,traceback
from qt import *
from qtcanvas import *
import pilot
import pypilot
import Item
import math
dispatcher=pilot.Dispatcher.getDispatcher()
class TextItem(QCanvasText):
"""A text in a composite object"""
def __init__(self,obj,canvas):
QCanvasText.__init__(self,canvas)
self.obj=obj
self.item=None
def getObj(self):
"""The composite object which contains the text"""
return self.obj
def moveBy(self,dx,dy):
"""Request the text move by x,y"""
if self.obj:
#the text is a part of a composite object
self.obj.moveBy(dx,dy)
else:
#the text is independant
self.myMove(dx,dy)
def myMove(self,dx,dy):
"""The real move"""
QCanvasText.moveBy(self,dx,dy)
def selected(self):
"""The canvas item has been selected"""
if self.obj:
self.obj.selected()
class PointItem(QCanvasEllipse):
def __init__(self,obj,x,y,canvas):
"""Create a point contained in a composite line (obj)"""
QCanvasEllipse.__init__(self,6,6,canvas)
self.obj=obj
self.item=None
self.inline=None
self.outline=None
self.setPen(QPen(Qt.black))
self.setBrush(QBrush(Qt.red))
self.setX(x)
self.setY(y)
self.setVisible(True)
def setInline(self,inline):
self.inline=inline
if inline.z() >= self.z():
self.setZ(inline.z()+1)
def setOutline(self,outline):
self.outline=outline
if outline.z() >= self.z():
self.setZ(outline.z()+1)
def moveBy(self,dx,dy):
"""Request the point move by x,y"""
self.myMove(dx,dy)
def myMove(self,dx,dy):
"""The real move"""
QCanvasEllipse.moveBy(self,dx,dy)
if self.outline:
self.outline.setFromPoint( int(self.x()), int(self.y()) )
if self.inline:
self.inline.setToPoint( int(self.x()), int(self.y()) )
def getObj(self):
"""The object which contains the point"""
return self.obj
def handleDoubleClick(self,pos):
self.obj.deletePoint(self,pos)
#def __del__(self):
# print "PointItem.__del__"
def clear(self):
"""To remove from canvas"""
self.setCanvas(None)
self.obj=None
self.inline=None
self.outline=None
def selected(self):
"""The canvas item has been selected"""
class LineItem(QCanvasLine):
"""A line between 2 points"""
def __init__(self,obj,fromPoint, toPoint,canvas):
QCanvasLine.__init__(self,canvas)
self.obj=obj
self.item=None
self.fromPoint=fromPoint
self.toPoint=toPoint
self.setPen(QPen(Qt.black))
self.setBrush(QBrush(Qt.red))
self.setPoints(int(fromPoint.x()),int(fromPoint.y()), int(toPoint.x()), int(toPoint.y()))
self.setZ(min(fromPoint.z(),toPoint.z())-1)
self.setVisible(True)
self.arrow = QCanvasPolygon(self.canvas())
self.arrow.setBrush(QBrush(Qt.black))
self.setArrow()
self.arrow.show()
def setFromPoint(self,x,y):
self.setPoints(x,y,self.endPoint().x(),self.endPoint().y())
self.setArrow()
def setToPoint(self,x,y):
self.setPoints(self.startPoint().x(), self.startPoint().y(),x,y)
self.setArrow()
def moveBy(self,dx,dy):
"""Disable line move"""
pass
def setArrow(self):
x1,y1=self.startPoint().x(),self.startPoint().y()
x2,y2=self.endPoint().x(),self.endPoint().y()
d=math.hypot(x2-x1,y2-y1)
sina=(y2-y1)/d
cosa=(x2-x1)/d
x=(x1+x2)/2.
y=(y1+y2)/2.
l,e=6,3
pa=QPointArray(3)
pa.setPoint(0, QPoint(x+l*cosa,y+l*sina))
pa.setPoint(1, QPoint(x-e*sina,y+e*cosa))
pa.setPoint(2, QPoint(x+e*sina,y-e*cosa))
self.arrow.setPoints(pa)
def getObj(self):
"""The object which contains the line"""
return self.obj
def handleDoubleClick(self,pos):
#split the line
self.obj.splitline(self,pos)
#def __del__(self):
# print "LineItem.__del__"
def clear(self):
"""To remove from canvas"""
self.setCanvas(None)
self.fromPoint=None
self.toPoint=None
self.obj=None
self.arrow.setCanvas(None)
self.arrow=None
def selected(self):
"""The canvas item has been selected"""
class LinkItem:
def __init__(self,fromPort, toPort,canvas):
self.fromPort=fromPort
self.toPort=toPort
self.canvas=canvas
self.item=None
fromPort.addOutLink(self)
toPort.addInLink(self)
self.lines=[]
self.points=[]
self.lines.append(LineItem(self,fromPort, toPort,canvas))
def deletePoint(self,point,pos):
"""Delete intermediate point"""
if point not in self.points:
return
self.points.remove(point)
inline=point.inline
outline=point.outline
inline.toPoint=outline.toPoint
inline.setToPoint(outline.toPoint.x(),outline.toPoint.y())
self.lines.remove(outline)
if inline.toPoint in self.points:
inline.toPoint.setInline(inline)
#remove from canvas
point.clear()
outline.clear()
def clearPoints(self):
#make a copy as deletePoint modify self.points
for point in self.points[:]:
self.deletePoint(point,0)
def splitline(self,line,pos):
self.splitLine(line,pos.x(),pos.y())
def splitLine(self,line,x,y):
"""Split line at position x,y"""
#The new point
point=PointItem(self,x,y,self.canvas)
self.points.append(point)
i=self.lines.index(line)
newline=LineItem(self,point,line.toPoint,self.canvas)
if line.toPoint in self.points:
#line not connected to port : reconnect newline
line.toPoint.setInline(newline)
self.lines.insert(i+1,newline)
line.setToPoint(x,y)
line.toPoint=point
point.setInline(line)
point.setOutline(newline)
def setFromPoint(self,x,y):
first=self.lines[0]
first.setFromPoint(x,y)
def setToPoint(self,x,y):
last=self.lines[-1]
last.setToPoint(x,y)
def moveBy(self,dx,dy):
pass
def popup(self,canvasView):
menu=QPopupMenu()
caption = QLabel( "<font color=darkblue><u><b>Link Menu</b></u></font>",menu )
caption.setAlignment( Qt.AlignCenter )
menu.insertItem( caption )
menu.insertItem("Delete", self.delete)
return menu
def delete(self):
print "delete link"
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), pos.x()+10, pos.y()+10)
s = QString( "link: "+self.fromPort.port.getNode().getName() +":"+self.fromPort.port.getName()+"->"+self.toPort.port.getNode().getName()+":"+self.toPort.port.getName() )
view.tip( r, s )
def selected(self):
"""The canvas item has been selected"""
class ControlLinkItem(LinkItem):
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), pos.x()+10, pos.y()+10)
s = QString( "link: "+self.fromPort.port.getNode().getName()+"->"+self.toPort.port.getNode().getName())
view.tip( r, s )
#QToolTip(view).tip( r, s )
class ControlItem(QCanvasRectangle):
def __init__(self,node,port,canvas):
QCanvasRectangle.__init__(self,canvas)
self.setSize(6,6)
self.port=port
self.setPen(QPen(Qt.black))
self.setBrush(QBrush(Qt.red))
self.setZ(node.z()+1)
self.node=node
self.item=Item.adapt(self.port)
def moveBy(self,dx,dy):
self.node.moveBy(dx,dy)
def myMove(self,dx,dy):
QCanvasRectangle.moveBy(self,dx,dy)
def getObj(self):
return self
def popup(self,canvasView):
self.context=canvasView
menu=QPopupMenu()
caption = QLabel( "<font color=darkblue><u><b>Port Menu</b></u></font>",menu )
caption.setAlignment( Qt.AlignCenter )
menu.insertItem( caption )
menu.insertItem("Connect", self.connect)
return menu
def connect(self):
print "ControlItem.connect",self.context
print self.port
item=Item.adapt(self.port)
print item
item.connect()
self.context.connecting(item)
#self.context.connecting(self)
def link(self,obj):
#Protocol to link 2 objects (ports, at first)
#First, notify the canvas View (or any view that can select) we are connecting (see method connect above)
#Second (and last) make the link in the link method of object that was declared connecting
print "link:",obj
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), self.width(), self.height())
s = QString( "gate:")
view.tip( r, s )
def selected(self):
"""The canvas item has been selected"""
#print "control port selected"
item=Item.adapt(self.port)
item.selected()
class InControlItem(ControlItem):
def __init__(self,node,port,canvas):
ControlItem.__init__(self,node,port,canvas)
self.__inList=[]
def myMove(self,dx,dy):
ControlItem.myMove(self,dx,dy)
for link in self.__inList:
link.setToPoint( int(self.x()), int(self.y()) )
def link(self,obj):
#Here we create the link between self and obj.
#self has been declared connecting in connect method
print "link:",obj
if isinstance(obj,OutControlItem):
#Connection possible
l=LinkItem(obj,self,self.canvas())
def addInLink(self,link):
self.__inList.append(link)
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), self.width(), self.height())
s = QString( "ingate:")
view.tip( r, s )
#QToolTip(view).tip( r, s )
class OutControlItem(ControlItem):
def __init__(self,node,port,canvas):
ControlItem.__init__(self,node,port,canvas)
self.__outList=[]
def myMove(self,dx,dy):
ControlItem.myMove(self,dx,dy)
for link in self.__outList:
link.setFromPoint( int(self.x()), int(self.y()) )
def link(self,obj):
#Here we create the link between self and obj.
#self has been declared connecting in connect method
print "link:",obj
if isinstance(obj,InControlItem):
#Connection possible
l=LinkItem(self,obj,self.canvas())
def addOutLink(self,link):
self.__outList.append(link)
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), self.width(), self.height())
s = QString( "outgate:")
view.tip( r, s )
#QToolTip(view).tip( r, s )
def links(self):
return self.__outList
class PortItem(QCanvasEllipse):
def __init__(self,node,port,canvas):
QCanvasEllipse.__init__(self,6,6,canvas)
self.port=port
self.item=None
self.item=Item.adapt(self.port)
self.setPen(QPen(Qt.black))
self.setBrush(QBrush(Qt.red))
self.setZ(node.z()+1)
self.node=node
def moveBy(self,dx,dy):
self.node.moveBy(dx,dy)
def myMove(self,dx,dy):
QCanvasEllipse.moveBy(self,dx,dy)
def getObj(self):
return self
def popup(self,canvasView):
self.context=canvasView
menu=QPopupMenu()
caption = QLabel( "<font color=darkblue><u><b>Port Menu</b></u></font>",menu )
caption.setAlignment( Qt.AlignCenter )
menu.insertItem( caption )
menu.insertItem("Connect", self.connect)
return menu
def connect(self):
print "PortItem.connect",self.context
print self.port
item=Item.adapt(self.port)
print item
self.context.connecting(item)
#self.context.connecting(self)
def link(self,obj):
print "PortItem.link:",obj
def tooltip(self,view,pos):
r = QRect(pos.x(),pos.y(),self.width(), self.height())
t=self.port.edGetType()
s = QString( "port: " + self.port.getName() + ":" + t.name())
view.tip( r, s )
def selected(self):
"""The canvas item has been selected"""
#print "port selected"
item=Item.adapt(self.port)
item.selected()
class InPortItem(PortItem):
def __init__(self,node,port,canvas):
PortItem.__init__(self,node,port,canvas)
self.__inList=[]
def myMove(self,dx,dy):
PortItem.myMove(self,dx,dy)
for link in self.__inList:
link.setToPoint( int(self.x()), int(self.y()) )
def link(self,obj):
#Here we create the link between self and obj.
#self has been declared connecting in connect method
print "link:",obj
if isinstance(obj,OutPortItem):
#Connection possible
l=LinkItem(obj,self,self.canvas())
def addInLink(self,link):
self.__inList.append(link)
class OutPortItem(PortItem):
def __init__(self,node,port,canvas):
PortItem.__init__(self,node,port,canvas)
self.__outList=[]
def myMove(self,dx,dy):
PortItem.myMove(self,dx,dy)
for link in self.__outList:
link.setFromPoint( int(self.x()), int(self.y()) )
def link(self,obj):
#Here we create the link between self and obj.
#self has been declared connecting in connect method
print "link:",obj
if isinstance(obj,InPortItem):
#Connection possible
l=LinkItem(self,obj,self.canvas())
def addOutLink(self,link):
self.__outList.append(link)
def links(self):
return self.__outList
class InStreamItem(InPortItem):
def __init__(self,node,port,canvas):
InPortItem.__init__(self,node,port,canvas)
self.setBrush(QBrush(Qt.green))
class OutStreamItem(OutPortItem):
def __init__(self,node,port,canvas):
OutPortItem.__init__(self,node,port,canvas)
self.setBrush(QBrush(Qt.green))
class Cell(QCanvasRectangle,pypilot.PyObserver):
colors={
"pink":Qt.cyan,
"green":Qt.green,
"magenta":Qt.magenta,
"purple":Qt.darkMagenta,
"blue":Qt.blue,
"red":Qt.red,
"orange":Qt.yellow,
"grey":Qt.gray,
"white":Qt.white,
}
def __init__(self,node,canvas):
QCanvasRectangle.__init__(self,canvas)
pypilot.PyObserver.__init__(self)
self.inports=[]
self.outports=[]
self.setSize(50,50)
#node is an instance of YACS::ENGINE::Node
self.node=node
self.item=Item.adapt(self.node)
dispatcher.addObserver(self,node,"status")
self.label=TextItem(self,canvas)
self.label.setText(self.node.getName())
self.label.setFont(QFont("Helvetica",8))
rect=self.label.boundingRect()
self.label.setZ(self.z()+1)
self.label.myMove(self.x()+self.width()/2-rect.width()/2,self.y()+self.height()/2-rect.height()/2)
color= self.colors.get(node.getColorState(node.getEffectiveState()),Qt.white)
self.setBrush(QBrush(color))
dy=6
y=0
for inport in self.node.getSetOfInputPort():
p=InPortItem(self,inport,canvas)
y=y+dy
p.myMove(0,y)
self.inports.append(p)
for instream in self.node.getSetOfInputDataStreamPort():
p=InStreamItem(self,instream,canvas)
y=y+dy
p.myMove(0,y)
self.inports.append(p)
ymax=y
dy=6
y=0
for outport in self.node.getSetOfOutputPort():
p=OutPortItem(self,outport,canvas)
y=y+dy
p.myMove(50,y)
self.outports.append(p)
for outstream in self.node.getSetOfOutputDataStreamPort():
p=OutStreamItem(self,outstream,canvas)
y=y+dy
p.myMove(50,y)
self.outports.append(p)
ymax=max(y,ymax)
#Control ports
y=ymax+dy
if y < 44:y=44
p=InControlItem(self,self.node.getInGate(),canvas)
p.myMove(0,y)
self.inports.append(p)
self.ingate=p
p=OutControlItem(self,self.node.getOutGate(),canvas)
p.myMove(44,y)
self.outports.append(p)
self.outgate=p
y=y+dy
self.setSize(50,y)
events={
"status":QEvent.User+1,
}
def pynotify(self,object,event):
#print "pynotify",event,object
try:
evType=self.events[event]
ev=QCustomEvent(evType)
ev.setData(self)
ev.yacsEvent=event
QApplication.postEvent(self.canvas(), ev)
#request immediate processing (deadlock risk ???)
#QApplication.sendPostedEvents(self.canvas(), evType)
#print "pynotify end"
except:
#traceback.print_exc()
raise
def customEvent(self,event):
if event.yacsEvent=="status":
object=self.node
state=object.getEffectiveState()
color=object.getColorState(state)
color= self.colors.get(color,Qt.white)
self.setBrush(QBrush(color))
else:
print "Unknown custom event type:", event.type()
def moveBy(self,dx,dy):
QCanvasRectangle.moveBy(self,dx,dy)
self.label.myMove(dx,dy)
for p in self.inports:
p.myMove(dx,dy)
for p in self.outports:
p.myMove(dx,dy)
def show(self):
QCanvasRectangle.show(self)
self.label.show()
for p in self.inports:
p.show()
for p in self.outports:
p.show()
def getObj(self):
return self
def popup(self,canvasView):
menu=QPopupMenu()
caption = QLabel( "<font color=darkblue><u><b>Node Menu</b></u></font>",menu )
caption.setAlignment( Qt.AlignCenter )
menu.insertItem( caption )
menu.insertItem("Browse", self.browse)
return menu
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), self.width(), self.height())
s = QString( "node: " + self.node.getName())
view.tip( r, s )
#QToolTip(view).tip( r, s )
def browse(self):
print "browse"
def selected(self):
"""The canvas item has been selected"""
#print "node selected"
item=Item.adapt(self.node)
item.selected()
| gpl-2.0 | -5,207,753,178,458,812,000 | 26.761453 | 174 | 0.653275 | false |
stscieisenhamer/glue | glue/app/qt/splash_screen.py | 1 | 1493 | import os
from qtpy import QtWidgets, QtGui
from qtpy.QtCore import Qt, QRect
__all__ = ['QtSplashScreen']
class QtSplashScreen(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(QtSplashScreen, self).__init__(*args, **kwargs)
self.resize(627, 310)
self.setStyleSheet("background-color:white;")
self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
self.center()
self.progress = QtWidgets.QProgressBar()
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addStretch()
self.layout.addWidget(self.progress)
pth = os.path.join(os.path.dirname(__file__), '..', '..', 'logo.png')
self.image = QtGui.QPixmap(pth)
def set_progress(self, value):
self.progress.setValue(value)
QtWidgets.qApp.processEvents() # update progress bar
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawPixmap(QRect(20, 20, 587, 229), self.image)
def center(self):
# Adapted from StackOverflow
# https://stackoverflow.com/questions/20243637/pyqt4-center-window-on-active-screen
frameGm = self.frameGeometry()
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
centerPoint = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
| bsd-3-clause | 1,320,943,867,231,352,300 | 32.177778 | 111 | 0.663094 | false |
ZeitOnline/zeit.cms | src/zeit/cms/section/testing.py | 1 | 1175 | import plone.testing
import zeit.cms.repository.interfaces
import zeit.cms.section.interfaces
import zeit.cms.testcontenttype.interfaces
import zeit.cms.testing
import zope.component
import zope.interface
ZCML_LAYER = zeit.cms.testing.ZCMLLayer(
'ftesting.zcml', product_config=zeit.cms.testing.cms_product_config)
class SectionLayer(plone.testing.Layer):
defaultBases = (ZCML_LAYER,)
def testSetUp(self):
with zeit.cms.testing.site(self['functional_setup'].getRootFolder()):
repository = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
example = zeit.cms.repository.folder.Folder()
zope.interface.alsoProvides(example, IExampleSection)
repository['example'] = example
SECTION_LAYER = SectionLayer()
class IExampleSection(zeit.cms.section.interfaces.ISection):
pass
class IExampleContent(zeit.cms.interfaces.ICMSContent,
zeit.cms.section.interfaces.ISectionMarker):
pass
class IExampleTestcontent(
zeit.cms.testcontenttype.interfaces.IExampleContentType,
zeit.cms.section.interfaces.ISectionMarker):
pass
| bsd-3-clause | -5,837,318,905,584,816,000 | 27.658537 | 77 | 0.726809 | false |
google-research/google-research | m_theory/dim4/so8_supergravity_extrema/code/symmetries.py | 1 | 34942 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes residual symmetries of solutions.
As all critical points with a rank-2 simple Lie group symmetry have been
known for many years, we can restrict ourselves to a residual Lie symmetry of
Spin(3)^A x U(1)^B. This considerably simplifies the analysis.
"""
import cmath
import collections
import glob
import itertools
import math
import numpy
import os
import pprint
# CAUTION: scipy.linalg.eigh() will produce an orthonormal basis, while
# scipy.linalg.eig(), when used on a hermitean matrix, typically will not
# orthonormalize eigenvectors in degenerate eigenspaces.
# This behavior is not documented properly, but "obvious" when considering
# the underlying algorithm.
import scipy.linalg
from dim4.so8_supergravity_extrema.code import algebra
CanonicalizedSymmetry = collections.namedtuple(
'CanonicalizedSymmetry',
['u1s', # Sequence of U(1) generators, each as a 28-vector acting on [ik].
'semisimple_part', # [28, d]-array, semisimple part of the algebra.
'spin3_cartan_gens' # Cartan generators, one per spin(3) subalgebra.
])
# A `Spin8Action` tuple consists of an einsum reduction-string,
# typically of the form 'aij,aN->jiN', as well as the 1st tensor-argument
# to the corresponding contraction.
Spin8Action = collections.namedtuple(
'Spin8Action', ['einsum', 'tensor'])
class BranchingFormatter(object):
"""Base class for branching-formatters."""
def format(self, num_spin3s, branching):
return self.sum_join(self.format_irreps(num_spin3s, b) for b in branching)
def format_branching_tag(self, tag):
"""Formats tag (8, 'v') -> '8v' etc."""
tag_dim, tag_subscript = tag
return '%s%s' % (tag_dim, tag_subscript)
def sum_join(self, formatted):
return ' + '.join(formatted)
def format_multiplicity(self, multiplicity, formatted_obj):
"""Adds a multiplicity prefix to a formatted object."""
if multiplicity == 1:
return formatted_obj
return '%dx%s' % (multiplicity, formatted_obj)
def format_irreps(self, num_spin3s, irreps_part):
"""Formats a group of identical irreducible representations."""
charges, mult = irreps_part
return self.format_multiplicity(mult,
self.format_irrep(num_spin3s, charges))
def format_irrep(self, num_spin3s, charges):
"""Formats a single irreducible representation."""
if set(charges[:num_spin3s]) == {0}:
spin3_part = ''
else:
spin3_part = 'x'.join('%s' % int(round(2 * c + 1))
for c in charges[:num_spin3s])
assert all(c == int(c) for c in charges[num_spin3s:])
u1_part = ', '.join(str(int(c)) for c in charges[num_spin3s:])
if spin3_part:
return ('[%s]{%s}' % (spin3_part, u1_part) if u1_part
else '[%s]' % spin3_part)
else:
return '{%s}' % u1_part
class LaTeXBranchingFormatter(BranchingFormatter):
"""BranchingFormatter that generates LaTeX code."""
def format_branching_tag(self, tag):
"""Formats tag (8, 'v') -> '8_{v}' etc."""
tag_dim, tag_subscript = tag
return '%s_{%s}' % (tag_dim, tag_subscript)
def format_multiplicity(self, multiplicity, formatted_obj):
if multiplicity == 1:
return formatted_obj
return r'%d\times%s' % (multiplicity, formatted_obj)
def _format_charge(self, c, sub_super):
assert c == int(c)
if c == 0:
return ''
return r'%s{\scriptscriptstyle %s}' % (sub_super, '-+'[c > 0] * abs(int(c)))
def format_irrep(self, num_spin3s, charges):
# We use style such as 33^{+++}_{--},
# i.e. 1st U(1) gets superscript charges,
# 2nd U(1) gets subscript charges.
assert all(c == int(c) for c in charges[num_spin3s:])
if set(charges[:num_spin3s]) <= {0}:
spin3_part = r'\mathbf{1}' # No Spin3s, or only singlet.
elif num_spin3s == 1:
spin3_part = r'\mathbf{%s}' % int(round(2 * charges[0] + 1))
else:
spin3_part = '(%s)' % (
','.join(r'\mathbf{%d}' % int(round(2 * c + 1))
for c in charges[:num_spin3s]))
num_u1s = len(charges) - num_spin3s
u1a_part = u1b_part = ''
if num_u1s >= 1:
u1a_part = self._format_charge(charges[num_spin3s], '^')
if num_u1s == 2:
u1b_part = self._format_charge(charges[num_spin3s + 1], '_')
return spin3_part + u1a_part + u1b_part
TEXT_FORMATTER = BranchingFormatter()
LATEX_FORMATTER = LaTeXBranchingFormatter()
# The Spin(8) structure constants.
_spin8_fabc = 2 * numpy.einsum('cik,abik->abc',
algebra.su8.m_28_8_8,
# We do not need to antisymmetrize [ik] here,
# as the above factor already does this.
numpy.einsum('aij,bjk->abik',
algebra.su8.m_28_8_8,
algebra.su8.m_28_8_8))
_spin8_action56 = numpy.einsum('aik,ABik->aAB',
algebra.su8.m_28_8_8,
algebra.su8.m_action_56_56_8_8)
# Branching-rules task specification, as used for the `decomposition_tasks`
# argument to spin3u1_decompose().
# One may generally want to pass an extended arg that adds tasks which also
# decompose e.g. degenerate mass-eigenstates w.r.t. symmetry.
# These are also used to find scaling for u(1) generators that makes all
# 8v, 8s, 8c charges integral.
SPIN8_ACTION_8V = Spin8Action(einsum='aij,aN->jiN',
tensor=algebra.su8.m_28_8_8)
SPIN8_ACTION_8V = Spin8Action(einsum='aij,aN->jiN',
tensor=algebra.su8.m_28_8_8)
SPIN8_ACTION_8S = Spin8Action(
einsum='aAB,aN->BAN',
tensor=numpy.einsum('aij,ijAB->aAB',
0.25 * algebra.su8.m_28_8_8,
algebra.spin8.gamma_vvss))
SPIN8_ACTION_8C = Spin8Action(
einsum='aAB,aN->BAN',
tensor=numpy.einsum('aij,ijAB->aAB',
0.25 * algebra.su8.m_28_8_8,
algebra.spin8.gamma_vvcc))
SPIN8_ACTION_AD = Spin8Action(einsum='aAB,aN->BAN', tensor=_spin8_fabc * 0.5)
SPIN8_ACTION_FERMIONS = Spin8Action(einsum='aAB,aN->BAN',
tensor=_spin8_action56)
SPIN8_ACTION_SCALARS = Spin8Action(
einsum='aAB,aN->BAN',
tensor=0.5 * algebra.e7.spin8_action_on_v70o)
SPIN8_BRANCHINGS_VSC = (
(SPIN8_ACTION_8V,
[((8, 'v'), numpy.eye(8))]),
(SPIN8_ACTION_8S,
[((8, 's'), numpy.eye(8))]),
(SPIN8_ACTION_8C,
[((8, 'c'), numpy.eye(8))]))
# Extended branching-rules task speficication, adds 28->... branching.
SPIN8_BRANCHINGS = (
SPIN8_BRANCHINGS_VSC +
((SPIN8_ACTION_AD, [((28, ''), numpy.eye(28))]),))
def round2(x):
"""Rounds number to 2 digits, canonicalizing -0.0 to 0.0."""
return numpy.round(x, 2) or 0.0
def allclose2(p, q):
"""Determines if `p` and `q` match to two digits."""
return numpy.allclose(p, q, rtol=0.01, atol=0.01)
def aggregate_eigenvectors(eigvals, eigvecs, tolerance=1e-6):
"""Collects eigenvectors by eigenvalue into eigenspaces.
The `eigvals` and `eigvecs` arguments must be as produced by
scipy.linalg.eigh().
Args:
eigvals, array of eigenvalues. Must be approximately-real.
eigvecs, array of eigenvectors.
tolerance, float. Tolerance threshold for considering eigenvalues
as degenerate.
Returns:
List of the form [(eigenvalue, eigenspace), ...],
where each `eigenspace` is a list of eigenvectors for the corresponding
eigenvalue.
Raises:
ValueError, if reality requirements are violated.
"""
if not numpy.allclose(eigvals, eigvals.real):
raise ValueError('Non-real eigenvalues.')
eigvalue_and_aggregated_eigvecs = []
for eigvalue, eigvec in sorted(zip(eigvals.real,
[tuple(v.astype(numpy.complex128))
for v in eigvecs.T]),
# Do not compare eigenvectors for degenerate
# eigenvalues. Sort by descending order.
key=lambda ev_evec: -ev_evec[0]):
for eigvalue_known, eigvecs_known in eigvalue_and_aggregated_eigvecs:
if abs(eigvalue - eigvalue_known) <= tolerance:
eigvecs_known.append(eigvec)
break
else: # Reached end of loop.
eigvalue_and_aggregated_eigvecs.append((eigvalue, [eigvec]))
return eigvalue_and_aggregated_eigvecs
def get_residual_gauge_symmetry(v70, threshold=0.05):
"""Maps scalar 70-vector to [a, n]-tensor of unbroken symmetry generators.
Index `a` is a Spin(8)-adjoint index, `n` counts (orthonormal) basis vectors.
Args:
v70: The e7/su8 70-vector describing a point on the scalar manifold.
threshold: Threshold on the generalized SVD-eigenvalue for considering
a direction as belonging to the residual symmetry.
"""
su, ss, svh = scipy.linalg.svd(
numpy.einsum('avw,v->aw',
algebra.e7.spin8_action_on_v70,
v70))
del svh # Unused.
# Select those columns for which the diagonal entry is essentially zero.
return su.T[ss <= threshold].T
def get_simultaneous_eigenbasis(commuting_gens,
gen_action_einsum='abc,aN->cbN',
gen_action_tensor=_spin8_fabc,
initial_space=None,
checks=True,
tolerance=1e-6):
"""Finds a simultaneous eigenbasis for a collection of commuting generators.
Args:
commuting_gens: [28, N]-array of real and mutually orthogonal generators.
gen_action_einsum: numpy.einsum() contraction specification that maps
`gen_action_tensor` and `commuting_gens` to a set of N matrices given as
[D, D, N]-array that represent the generators on the desired space.
initial_space: [D, K]-dimensional initial space to decompose into
eigenspaces, or `None`. If `None`, uses numpy.eye(D).
checks: If True, perform internal consistency checks.
tolerance: Tolerance difference-threshold for considering
two eigenvalues as identical.
Returns:
Pair of (simultaneous_eigenbasis, charges), where `simultaneous_eigenbasis`
is a [28, K]-dimensional array of eigenvectors, and `charges` is a list
of corresponding charge-tuples.
"""
# Map generators to endomorphisms. Our conventions are such that
# the result of contracting with `gen_action_tensor` also gets multiplied
# with 1j. For spin(8) action on 8v, 8s, 8c, 28, etc., this ensures that
# with all-real generators and all-real action-tensor, we get hermitean
# endomorphisms with all-real spectrum.
gens_action = numpy.einsum(gen_action_einsum,
gen_action_tensor,
commuting_gens) * 1j
if initial_space is None:
initial_space = numpy.eye(gens_action.shape[0])
#
def recursively_split_eigenspaces(num_generator, charge_tagged_eigenspaces):
"""Recursively splits an eigenspace.
Args:
num_generator: The number of the commuting generator to use for the next
splitting-step.
charge_tagged_eigenspaces: List [(partial_charges, subspace), ...]
where `partial_charges` is a tuple of charges w.r.t. the first
`num_generator` generators (so, () for num_generator == 0),
and `subspace` is a [D, K]-array of subspace directions.
Returns:
(Ultimately), fully split charge_tagged_eigenspaces, where the
`partial_charges` tags list as many charges as there are generators.
"""
if num_generator == gens_action.shape[-1]:
return charge_tagged_eigenspaces
gen_action = gens_action[:, :, num_generator]
split_eigenspaces = []
for charges, espace in charge_tagged_eigenspaces:
if checks:
eigenspace_sprod = numpy.einsum('aj,ak->jk', espace.conj(), espace)
assert allclose2(
eigenspace_sprod,
numpy.eye(espace.shape[1])), (
'Weird Eigenspace normalization: ' + repr(
numpy.round(eigenspace_sprod, 3)))
gen_on_eigenspace = numpy.einsum(
'aj,ak->jk',
espace.conj(),
numpy.einsum('ab,bj->aj', gen_action, espace))
sub_eigvals, sub_eigvecs_T = scipy.linalg.eigh(gen_on_eigenspace)
list_approx_eigval_and_eigvecs = []
for sub_eigval, sub_eigvec in zip(sub_eigvals, sub_eigvecs_T.T):
# Lift back to original space.
eigvec = numpy.einsum('gs,s->g', espace, sub_eigvec) # |v> <v| G |v>
if checks:
gv = numpy.dot(gen_action, eigvec)
ev = sub_eigval * eigvec
assert allclose2(gv, ev), (
'Sub-Eigval is bad: g*v=%r, e*v=%r' % (
numpy.round(gv, 3), numpy.round(ev, 3)))
assert allclose2(
numpy.dot(eigvec.conj(), eigvec), 1.0), (
'Eigenvector is not normalized.')
for seen_eigval, seen_eigvecs in list_approx_eigval_and_eigvecs:
if abs(sub_eigval - seen_eigval) <= tolerance:
assert all(allclose2(0, numpy.dot(s.conj(), eigvec))
for s in seen_eigvecs), 'Non-Orthogonality'
seen_eigvecs.append(eigvec)
break
else: # Reached end of list.
list_approx_eigval_and_eigvecs.append(
(sub_eigval, # This is also the actual eigenvalue.
[eigvec]))
for eigval, eigvecs in list_approx_eigval_and_eigvecs:
eigenspace = numpy.stack(eigvecs, axis=-1)
assert allclose2(
numpy.einsum('aj,ak->jk', eigenspace.conj(), eigenspace),
numpy.eye(eigenspace.shape[-1])), 'Bad Eigenspace'
split_eigenspaces.append((charges + (eigval,), eigenspace))
return recursively_split_eigenspaces(num_generator + 1, split_eigenspaces)
#
charge_tagged_eigenspaces = recursively_split_eigenspaces(
0, [((), initial_space)])
simultaneous_eigenbasis = numpy.stack(
[evec for _, espace in charge_tagged_eigenspaces for evec in espace.T],
axis=-1)
charges = [evec_charges
for evec_charges, espace in charge_tagged_eigenspaces
for evec in espace.T]
return simultaneous_eigenbasis, charges
def scale_u1_generator_to_8vsc_integral_charges(u1_gen, round_to_digits=3):
"""Scales a generator such that all 8v, 8s, 8c charges are integers."""
charges = []
for spin8action, _ in SPIN8_BRANCHINGS_VSC:
eigvals, _ = scipy.linalg.eigh(
numpy.einsum(spin8action.einsum,
spin8action.tensor,
1j * u1_gen.reshape((28, 1)))[:, :, 0])
assert numpy.allclose(eigvals, eigvals.real)
for eigval in eigvals:
charges.append(eigval)
approx_charges = sorted(set(abs(numpy.round(c, 6)) for c in charges) - {0.0})
factor = 1.0 / approx_charges[0]
for n in range(1, 25):
scaled_charges = [numpy.round(factor * n * c, round_to_digits)
for c in approx_charges]
if all(x == int(x) for x in scaled_charges):
return factor * n * u1_gen
raise ValueError('Could not re-scale U(1)-generator.')
def canonicalize_u1s(u1s, tolerance=1e-3):
"""Canonicalizes a collection of up to two u(1) generators."""
if u1s.shape[1] == 0:
return numpy.zeros([28, 0])
if u1s.shape[0] != 28:
raise ValueError(
'Each U(1) generator should be given as a 28-vector.')
num_u1s = u1s.shape[1]
if num_u1s > 2:
raise ValueError('Cannot handle more than two U(1)s')
if num_u1s == 1:
return scale_u1_generator_to_8vsc_integral_charges(u1s[:, 0]).reshape(28, 1)
eigvecs_T, evec_charges = get_simultaneous_eigenbasis(u1s)
a_vecs_eigvals = numpy.array(evec_charges).T
# Otherwise, we have exactly two U(1)s.
# How to reduce the charge-lattice?
zs = numpy.array([x + 1j * y for x, y in a_vecs_eigvals.T])
zs_by_origin_distance = sorted([z for z in zs if abs(z) >= tolerance],
key=abs)
z1 = zs_by_origin_distance[0]
angle = math.atan2(z1.imag, z1.real)
cos_angle = math.cos(angle)
sin_angle = math.sin(angle)
u1a = u1s[:, 0] * cos_angle + u1s[:, 1] * sin_angle
u1b = u1s[:, 0] * sin_angle - u1s[:, 1] * cos_angle
canon_u1s = numpy.stack([
scale_u1_generator_to_8vsc_integral_charges(u1a),
scale_u1_generator_to_8vsc_integral_charges(u1b)], axis=1)
return canon_u1s
def decompose_reductive_lie_algebra(residual_symmetry,
threshold=0.05):
"""Decomposes a residual symmetry into semisimple and u(1) parts.
Args:
residual_symmetry: Residual symmetry as produced by
`get_residual_gauge_symmetry()`.
threshold: Threshold for SVD generalized commutator-eigenvalue to consider
a generator as being part of the non-semisimple subalgebra.
"""
no_symmetry = numpy.zeros([28, 0])
if residual_symmetry.shape[1] == 0:
return no_symmetry, no_symmetry
commutators = numpy.einsum(
'avc,cw->avw',
numpy.einsum('abc,bv->avc', _spin8_fabc, residual_symmetry),
residual_symmetry)
su, ss, svh = scipy.linalg.svd(commutators.reshape(commutators.shape[0], -1))
del svh # Unused.
# We want those commutators that do not go to zero.
derivative_symmetry = su.T[:len(ss)][ss >= threshold].T
# By construction (via SVD), and using orthogonality of our spin(8) basis,
# `derivative_symmetry` already consists of orthogonal spin(8) generators, i.e.
# tr(AB) = 0 for basis vectors A != B.
# The 'complement' consists of u(1) factors that have zero inner product with
# `derivative_symmetry`.
if derivative_symmetry.size:
inner_products_with_input = numpy.einsum('av,aw->vw',
residual_symmetry,
derivative_symmetry)
su, ss, svh = scipy.linalg.svd(inner_products_with_input)
# Zero-pad the vector of 'generalized eigenvalues' to su's size.
ss_ext = numpy.concatenate(
[ss, numpy.zeros([max(0, su.shape[0] - len(ss))])])
u1s = numpy.einsum('av,vn->an',
residual_symmetry,
su.T[ss_ext <= threshold].T)
else: # All residual symmetry is in u(1)-factors.
return no_symmetry, residual_symmetry
# Assert that our U1s are orthogonal.
if u1s.size:
# Check generator orthonormality.
assert numpy.allclose(numpy.einsum('av,aw->vw', u1s, u1s),
numpy.eye(u1s.shape[1]), atol=1e-6)
else:
u1s = no_symmetry
return derivative_symmetry, u1s
def find_raw_cartan_subalgebra(spin8_subalgebra_generators, threshold=1e-3):
"""Finds a Cartan subalgebra for an algebra if the form A*so(3) + B*u(1)."""
if spin8_subalgebra_generators.shape[1] == 0:
return numpy.zeros([28, 0])
subalgebra_sprods = numpy.einsum(
'aj,ak->jk', spin8_subalgebra_generators, spin8_subalgebra_generators)
# Check that incoming subalgebra-generators really are reasonably orthonormal
# (up to overall scaling) w.r.t. Cartan-Killing metric.
assert numpy.allclose(subalgebra_sprods,
numpy.eye(spin8_subalgebra_generators.shape[1]))
cartan_generators_found = []
residual_charge_zero_subspace = spin8_subalgebra_generators
while True:
gen = residual_charge_zero_subspace[:, 0]
cartan_generators_found.append(gen)
assert numpy.allclose(gen, gen.real), 'Generator is not real!'
orthogonal_subalgebra = residual_charge_zero_subspace[:, 1:]
if not orthogonal_subalgebra.shape[1]:
return numpy.stack(cartan_generators_found, axis=-1)
gen_ad_action_on_spin8 = numpy.einsum('abc,a->cb', _spin8_fabc, gen)
gen_action_on_orthogonal_subalgebra = numpy.einsum(
'ai,aj->ij',
orthogonal_subalgebra,
numpy.einsum('bc,cj->bj',
gen_ad_action_on_spin8 * 1j,
orthogonal_subalgebra))
assert numpy.allclose(gen_action_on_orthogonal_subalgebra +
gen_action_on_orthogonal_subalgebra.T,
numpy.zeros_like(gen_action_on_orthogonal_subalgebra))
eigvals, eigvecs_T = scipy.linalg.eigh(gen_action_on_orthogonal_subalgebra)
nullspace_gens = []
for eigval, eigvec in zip(eigvals, eigvecs_T.T):
if abs(eigval) <= threshold:
assert numpy.allclose(eigvec, eigvec.real)
nullspace_gens.append(
numpy.einsum('ai,i->a', orthogonal_subalgebra, eigvec.real))
if not len(nullspace_gens):
return numpy.stack(cartan_generators_found, axis=-1)
nullspace = numpy.stack(nullspace_gens, axis=1)
assert numpy.allclose(nullspace, nullspace.real), 'Non-real nullspace'
assert numpy.allclose(numpy.einsum('ai,aj->ij', nullspace, nullspace),
numpy.eye(nullspace.shape[1])), 'Non-Ortho Nullspace'
residual_charge_zero_subspace = nullspace
def weightspace_decompose(generator_action,
cartan_subalgebra_generators,
space,
tolerance=1e-6):
"""Decomposes `space` into subspaces tagged by weight-vectors."""
seq_cartan_generators = list(cartan_subalgebra_generators.T)
def cartan_split(subspace_tagged_by_weight_prefix, num_cartan_generator):
cartan_action = numpy.einsum(
'aIJ,a->IJ',
generator_action,
seq_cartan_generators[num_cartan_generator] * 1j)
result = []
for weight_prefix, subspace in subspace_tagged_by_weight_prefix:
assert numpy.allclose(
numpy.einsum('aJ,aK->JK', subspace.conj(), subspace),
numpy.eye(subspace.shape[1])), (
'Non-orthonormalized subspace:\n' +
repr(numpy.round(numpy.einsum('aJ,aK->JK',
subspace.conj(),
subspace), 3)))
cartan_action_on_subspace = numpy.einsum(
'Jm,Jn->mn', subspace.conj(),
numpy.einsum('JK,Kn->Jn', cartan_action, subspace))
eigvals, eigvecs_T = scipy.linalg.eigh(cartan_action_on_subspace)
eigval_and_rel_eigenspace = aggregate_eigenvectors(eigvals, eigvecs_T)
for eigval, rel_eigenspace in eigval_and_rel_eigenspace:
ext_weight_prefix = (weight_prefix + (eigval,))
result.append((ext_weight_prefix,
numpy.einsum('In,nj->Ij',
subspace,
numpy.stack(rel_eigenspace, axis=-1))))
if num_cartan_generator == len(seq_cartan_generators) - 1:
return result
return cartan_split(result, num_cartan_generator + 1)
return cartan_split([((), space)], 0)
def get_simple_roots_info(rootspaces, threshold=0.01):
"""Extracts simple roots from weightspace-decomposition of a Lie algebra."""
# Finite-dimensional simple Lie algebras have one-dimensional root spaces.
# We use this to eliminate the Cartan subalgebra at the zero-root.
rank = len(rootspaces[0][0])
null_root = (0.0,) * rank
positive_roots = [root for root, subspace in rootspaces
if subspace.shape[1] == 1 and root > null_root]
def root_length_squared(root):
return sum(x * x for x in root)
def root_distance(root1, root2):
return max(abs(r1 - r2) for r1, r2 in zip(root1, root2))
# If the root is 'clearly too long', drop it rightaway.
# It does not hurt if we allow a large amount of slack,
# as this is just for increased performance.
threshold_root_length_squared = max(
map(root_length_squared, positive_roots)) * (1 + threshold)
sum_roots = []
for root1 in positive_roots:
for root2 in positive_roots:
root12 = tuple(r1 + r2 for r1, r2 in zip(root1, root2))
if root_length_squared(root12) > threshold_root_length_squared:
continue
for sum_root in sum_roots:
if root_distance(sum_root, root12) <= threshold:
break # We already know this sum-root.
else: # Reached end of loop.
sum_roots.append(root12)
simple_roots = [root for root in positive_roots
if not any(root_distance(sum_root, root) < threshold
for sum_root in sum_roots)]
a_simple_roots = numpy.array(simple_roots)
simple_root_sprods = numpy.einsum('rj,rk->jk', a_simple_roots, a_simple_roots)
# We always normalize the length-squared of the longest root to 2.
scaling_factor_squared = 2.0 / max(
simple_root_sprods[n, n] for n in range(simple_root_sprods.shape[0]))
scaling_factor = math.sqrt(scaling_factor_squared)
scaled_root_sprods = simple_root_sprods * scaling_factor_squared
# For spin(3)^N, the roots have to be mutually orthogonal
# with length-squared 2.
assert numpy.allclose(scaled_root_sprods,
2 * numpy.eye(simple_root_sprods.shape[0]) )
pos_simple_rootspaces = [(pos_root, scaling_factor * pos_rootspace)
for (pos_root, pos_rootspace) in rootspaces
for simple_root in simple_roots
if tuple(simple_root) == tuple(pos_root)]
canonicalized_cartan_subalgebra_generators = []
for pos_root, pos_rootspace in pos_simple_rootspaces:
# For finite-dimensional Lie algebras, root spaces are one-dimensional.
assert pos_rootspace.shape[1] == 1
l_plus = pos_rootspace[:, 0]
l_minus = l_plus.conj()
cartan_h = -1j * numpy.einsum('abc,a,b->c', _spin8_fabc, l_plus, l_minus)
canonicalized_cartan_subalgebra_generators.append(cartan_h)
# TODO(tfish): Only return what we need, and *not* in a dict.
return dict(simple_root_sprods=simple_root_sprods,
canonicalized_cartan_subalgebra=numpy.stack(
canonicalized_cartan_subalgebra_generators, axis=-1),
scaling_factor_squared=scaling_factor_squared,
pos_simple_rootspaces=pos_simple_rootspaces,
scaled_root_sprods=scaled_root_sprods,
scaled_roots=a_simple_roots * math.sqrt(scaling_factor_squared))
def canonicalize_residual_spin3u1_symmetry(residual_symmetry):
"""Canonicalizes a residual so(3)^M u(1)^N symmetry."""
semisimple_part, raw_u1s = decompose_reductive_lie_algebra(residual_symmetry)
u1s = canonicalize_u1s(raw_u1s)
spin3_cartan_gens_raw = find_raw_cartan_subalgebra(semisimple_part)
return CanonicalizedSymmetry(u1s=u1s,
semisimple_part=semisimple_part,
spin3_cartan_gens=spin3_cartan_gens_raw)
def group_charges_into_spin3u1_irreps(num_spin3s, charge_vecs):
"""Groups observed charges into irreducible representations.
Args:
num_spin3s: Length of the prefix of the charge-vector that belongs to
spin(3) angular momentum operators.
charge_vecs: List of charge-tuple vectors.
Returns:
List [((tuple(highest_spin3_weights) + tuple(u1_charges)), multiplicity),
...] of irreducible-representation descriptions, sorted by descending
combined-charge-vector.
"""
def spin3_weights(highest_weight):
"""Computes a list of spin3 weights for a given irrep highest weight.
E.g.: highest_weight = 1.5 -> [1.5, 0.5, -0.5, -1.5].
Args:
highest_weight: The highest weight (Element of [0, 0.5, 1.0, 1.5, ...]).
Returns: List of weights, in descending order.
"""
w2 = int(round(2 * highest_weight))
return [highest_weight - n for n in range(1 + w2)]
def descendants(cvec):
for spin3_part in itertools.product(
*[spin3_weights(w) for w in cvec[:num_spin3s]]):
yield spin3_part + cvec[num_spin3s:]
charges_todo = collections.Counter(charge_vecs)
irreps = collections.defaultdict(int)
while charges_todo:
cvec, cvec_mult = sorted(charges_todo.items(), reverse=True)[0]
for cvec_desc in descendants(cvec):
charges_todo[cvec_desc] -= cvec_mult
if charges_todo[cvec_desc] == 0:
del charges_todo[cvec_desc]
irreps[cvec] += cvec_mult
return sorted(irreps.items(), reverse=True) # Highest charges first.
def spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks=SPIN8_BRANCHINGS,
simplify=round2):
"""Computes decompositions into so(3)^M x u(1)^N irreducible representations.
Args:
canonicalized_symmetry: A `CanonicalizedSymmetry` object.
decomposition_tasks: Sequence of pairs (spin8action, tasks),
where `tasks` is a sequence of pairs (tag, orthogonalized_subspace).
simplify: The rounding function used to map approximately-integer charges
to integers.
"""
spin3_gens = (canonicalized_symmetry.spin3_cartan_gens.T
if (canonicalized_symmetry.spin3_cartan_gens is not None
and len(canonicalized_symmetry.spin3_cartan_gens)) else [])
u1_gens = (canonicalized_symmetry.u1s.T
if (canonicalized_symmetry.u1s is not None
and len(canonicalized_symmetry.u1s)) else [])
num_spin3s = len(spin3_gens)
num_u1s = len(u1_gens)
def grouped(charges):
# Spin(3) angular momentum charges need to be half-integral.
# For U(1) generators, we are not requiring this.
assert all(round2(2 * c) == int(round2(2 * c))
for charge_vec in charges
for c in charge_vec[:num_spin3s])
return group_charges_into_spin3u1_irreps(
num_spin3s,
[tuple(map(simplify, charge_vec)) for charge_vec in charges])
if num_spin3s:
rootspaces = weightspace_decompose(
_spin8_fabc,
spin3_gens.T,
canonicalized_symmetry.semisimple_part)
sroot_info = get_simple_roots_info(rootspaces)
angular_momentum_u1s = list(sroot_info['canonicalized_cartan_subalgebra'].T)
else:
angular_momentum_u1s = []
list_commuting_gens = (
[g for g in [angular_momentum_u1s, u1_gens] if len(g)])
commuting_gens = (numpy.concatenate(list_commuting_gens).T
if list_commuting_gens else numpy.zeros([28, 0]))
ret = []
for spin8action, tasks in decomposition_tasks:
ret.append([])
for task_tag, space_to_decompose in tasks:
_, charges = get_simultaneous_eigenbasis(
commuting_gens,
gen_action_einsum=spin8action.einsum,
gen_action_tensor=spin8action.tensor,
initial_space=space_to_decompose)
ret[-1].append((task_tag, grouped(charges)))
return ret
def spin3u1_branching_and_spectra(canonicalized_symmetry,
decomposition_tasks=()):
"""Computes so(3)^M x u(1)^N spectra."""
vsc_ad_branching = spin3u1_decompose(canonicalized_symmetry)
spectra = spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks)
return vsc_ad_branching, spectra
def spin3u1_physics(
canonicalized_symmetry,
mass_tagged_eigenspaces_gravitinos=(),
mass_tagged_eigenspaces_fermions=(),
mass_tagged_eigenspaces_scalars=(),
# Note that we see cases where we have very uneven parity-mixtures.
parity_tolerance=1e-7):
"""Computes so(3)^M x u(1)^N spectra."""
vsc_ad_branching = spin3u1_decompose(canonicalized_symmetry)
decomposition_tasks = []
# Gravitino tasks.
gravitino_tasks = []
for gravitino_mass, basis in mass_tagged_eigenspaces_gravitinos:
subspace = numpy.array(basis).T
task_tag = ('gravitinos', subspace.shape, gravitino_mass)
gravitino_tasks.append((task_tag, subspace))
decomposition_tasks.append(
(SPIN8_ACTION_8V, gravitino_tasks))
# Fermion tasks.
fermion_tasks = []
for fermion_mass, basis in mass_tagged_eigenspaces_fermions:
subspace = numpy.array(basis).T
task_tag = ('fermions', subspace.shape, fermion_mass)
fermion_tasks.append((task_tag, subspace))
decomposition_tasks.append(
(SPIN8_ACTION_FERMIONS, fermion_tasks))
# Scalar tasks.
scalar_tasks = []
# For scalars, we try to split off mass-eigenstates that are
# 35s-only or 35c-only.
p_op = numpy.eye(70)
p_op[35:, 35:] *= -1
for scalar_mass, basis in mass_tagged_eigenspaces_scalars:
a_basis = numpy.array(basis)
p_op_on_basis = numpy.einsum('jn,nm,km->jk', a_basis.conj(), p_op, a_basis)
assert numpy.allclose(p_op_on_basis, p_op_on_basis.real)
assert numpy.allclose(p_op_on_basis, p_op_on_basis.T)
p_op_eigvals, p_op_eigvecs_T = numpy.linalg.eigh(p_op_on_basis)
p_op_eigvals_re = p_op_eigvals.real
assert numpy.allclose(p_op_eigvals, p_op_eigvals_re)
# We have to lift the p_op_eigvecs_T to a_basis.
subspace_eigvecs = numpy.einsum('vn,vV->Vn', p_op_eigvecs_T, a_basis)
eigval_eigvecs = aggregate_eigenvectors(p_op_eigvals_re, subspace_eigvecs,
tolerance=1e-4)
# subspaces_35s and subspaces_35c each have <=1 entries.
subspaces_35s = [eigvecs for eigval, eigvecs in eigval_eigvecs
if eigval > 1 - parity_tolerance]
subspaces_35c = [eigvecs for eigval, eigvecs in eigval_eigvecs
if eigval < -1 + parity_tolerance]
merged_subspaces_other = [
eigvec for eigval, eigvecs in eigval_eigvecs
for eigvec in eigvecs
if -1 + parity_tolerance <= eigval <= 1 - parity_tolerance]
for subspace in subspaces_35s:
a_subspace = numpy.array(subspace).T
task_tag = ('scalars', a_subspace.shape, scalar_mass, 's')
scalar_tasks.append((task_tag, a_subspace))
for subspace in subspaces_35c:
a_subspace = numpy.array(subspace).T
task_tag = ('scalars', a_subspace.shape, scalar_mass, 'c')
scalar_tasks.append((task_tag, a_subspace))
# "Mixture" states. While we do get them in terms of parity-eigenstates,
# for 'weird' eigenvalues such as -1/3. Here, we just merge them all back
# together into one space, i.e. forget about resolving the spectrum.
# Why? Otherwise, we may see in the report
# "0.000m{1}, 0.000m{1}, 0.000m{1}, ...", which is not overly informative.
a_subspace = numpy.array(merged_subspaces_other).T
if len(merged_subspaces_other):
task_tag = ('scalars', a_subspace.shape, scalar_mass, 'm')
scalar_tasks.append((task_tag, a_subspace))
decomposition_tasks.append(
(SPIN8_ACTION_SCALARS, scalar_tasks))
spectra = spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks)
return vsc_ad_branching, spectra
| apache-2.0 | 8,383,226,262,256,209,000 | 42.08508 | 81 | 0.638372 | false |
llvm-mirror/llvm | utils/llvm-locstats/llvm-locstats.py | 6 | 7999 | #!/usr/bin/env python
#
# This is a tool that works like debug location coverage calculator.
# It parses the llvm-dwarfdump --statistics output by reporting it
# in a more human readable way.
#
from __future__ import print_function
import argparse
import os
import sys
from json import loads
from math import ceil
from subprocess import Popen, PIPE
def coverage_buckets():
yield '0%'
yield '1-9%'
for start in range(10, 91, 10):
yield '{0}-{1}%'.format(start, start + 9)
yield '100%'
def locstats_output(
variables_total,
variables_total_locstats,
variables_with_loc,
scope_bytes_covered,
scope_bytes_from_first_def,
variables_coverage_map
):
pc_ranges_covered = int(ceil(scope_bytes_covered * 100.0)
/ scope_bytes_from_first_def)
variables_coverage_per_map = {}
for cov_bucket in coverage_buckets():
variables_coverage_per_map[cov_bucket] = \
int(ceil(variables_coverage_map[cov_bucket] * 100.0) \
/ variables_total_locstats)
print (' =================================================')
print (' Debug Location Statistics ')
print (' =================================================')
print (' cov% samples percentage(~) ')
print (' -------------------------------------------------')
for cov_bucket in coverage_buckets():
print (' {0:6} {1:8d} {2:3d}%'. \
format(cov_bucket, variables_coverage_map[cov_bucket], \
variables_coverage_per_map[cov_bucket]))
print (' =================================================')
print (' -the number of debug variables processed: ' \
+ str(variables_total_locstats))
print (' -PC ranges covered: ' + str(pc_ranges_covered) + '%')
# Only if we are processing all the variables output the total
# availability.
if variables_total and variables_with_loc:
total_availability = int(ceil(variables_with_loc * 100.0) \
/ variables_total)
print (' -------------------------------------------------')
print (' -total availability: ' + str(total_availability) + '%')
print (' =================================================')
def parse_program_args(parser):
parser.add_argument('-only-variables', action='store_true',
default=False,
help='calculate the location statistics only for '
'local variables'
)
parser.add_argument('-only-formal-parameters', action='store_true',
default=False,
help='calculate the location statistics only for '
'formal parameters'
)
parser.add_argument('-ignore-debug-entry-values', action='store_true',
default=False,
help='ignore the location statistics on locations with '
'entry values'
)
parser.add_argument('file_name', type=str, help='file to process')
return parser.parse_args()
def Main():
parser = argparse.ArgumentParser()
results = parse_program_args(parser)
if len(sys.argv) < 2:
print ('error: Too few arguments.')
parser.print_help()
sys.exit(1)
if results.only_variables and results.only_formal_parameters:
print ('error: Please use just one only* option.')
parser.print_help()
sys.exit(1)
# These will be different due to different options enabled.
variables_total = None
variables_total_locstats = None
variables_with_loc = None
variables_scope_bytes_covered = None
variables_scope_bytes_from_first_def = None
variables_scope_bytes_entry_values = None
variables_coverage_map = {}
binary = results.file_name
# Get the directory of the LLVM tools.
llvm_dwarfdump_cmd = os.path.join(os.path.dirname(__file__), \
"llvm-dwarfdump")
# The statistics llvm-dwarfdump option.
llvm_dwarfdump_stats_opt = "--statistics"
subproc = Popen([llvm_dwarfdump_cmd, llvm_dwarfdump_stats_opt, binary], \
stdin=PIPE, stdout=PIPE, stderr=PIPE, \
universal_newlines = True)
cmd_stdout, cmd_stderr = subproc.communicate()
# Get the JSON and parse it.
json_parsed = None
try:
json_parsed = loads(cmd_stdout)
except:
print ('error: No valid llvm-dwarfdump statistics found.')
sys.exit(1)
if results.only_variables:
# Read the JSON only for local variables.
variables_total_locstats = \
json_parsed['total vars procesed by location statistics']
variables_scope_bytes_covered = \
json_parsed['vars scope bytes covered']
variables_scope_bytes_from_first_def = \
json_parsed['vars scope bytes total']
if not results.ignore_debug_entry_values:
for cov_bucket in coverage_buckets():
cov_category = "vars with {} of its scope covered".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
variables_scope_bytes_entry_values = \
json_parsed['vars entry value scope bytes covered']
variables_scope_bytes_covered = variables_scope_bytes_covered \
- variables_scope_bytes_entry_values
for cov_bucket in coverage_buckets():
cov_category = \
"vars (excluding the debug entry values) " \
"with {} of its scope covered".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
elif results.only_formal_parameters:
# Read the JSON only for formal parameters.
variables_total_locstats = \
json_parsed['total params procesed by location statistics']
variables_scope_bytes_covered = \
json_parsed['formal params scope bytes covered']
variables_scope_bytes_from_first_def = \
json_parsed['formal params scope bytes total']
if not results.ignore_debug_entry_values:
for cov_bucket in coverage_buckets():
cov_category = "params with {} of its scope covered".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
variables_scope_bytes_entry_values = \
json_parsed['formal params entry value scope bytes covered']
variables_scope_bytes_covered = variables_scope_bytes_covered \
- variables_scope_bytes_entry_values
for cov_bucket in coverage_buckets():
cov_category = \
"params (excluding the debug entry values) " \
"with {} of its scope covered".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
# Read the JSON for both local variables and formal parameters.
variables_total = \
json_parsed['source variables']
variables_with_loc = json_parsed['variables with location']
variables_total_locstats = \
json_parsed['total variables procesed by location statistics']
variables_scope_bytes_covered = \
json_parsed['scope bytes covered']
variables_scope_bytes_from_first_def = \
json_parsed['scope bytes total']
if not results.ignore_debug_entry_values:
for cov_bucket in coverage_buckets():
cov_category = "variables with {} of its scope covered". \
format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
variables_scope_bytes_entry_values = \
json_parsed['entry value scope bytes covered']
variables_scope_bytes_covered = variables_scope_bytes_covered \
- variables_scope_bytes_entry_values
for cov_bucket in coverage_buckets():
cov_category = "variables (excluding the debug entry values) " \
"with {} of its scope covered". format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
# Pretty print collected info.
locstats_output(
variables_total,
variables_total_locstats,
variables_with_loc,
variables_scope_bytes_covered,
variables_scope_bytes_from_first_def,
variables_coverage_map
)
if __name__ == '__main__':
Main()
sys.exit(0)
| apache-2.0 | -7,798,051,178,738,378,000 | 37.272727 | 79 | 0.627578 | false |
hattwj/rainmaker | rainmaker/tests/unit/db/main_test.py | 1 | 2268 | from rainmaker.tests import test_helper, factory_helper
from rainmaker.main import Application
from rainmaker.db.main import init_db, HostFile, SyncFile, Sync, \
Host, Resolution, Download
from rainmaker.db import main
fh = factory_helper
def test_db_init():
init_db()
def test_sync_file_version_init():
init_db()
assert SyncFile(version=5).version == 5
def test_rain_base_before_changes():
session = init_db()
sync = factory_helper.Sync()
sync_file = factory_helper.SyncFile(sync, 1)
assert sync_file.before_changes()['sync_id'] == None
def test_sqlalchemy_property_assignment():
sf = HostFile()
sf.vers = [{'version': 0, 'file_size':5}]
assert sf.vers[0].file_size == 5
sf = SyncFile()
#print('Doing Set')
sf.vers = [{'version': 0, 'file_size':5}]
#print('Did Set')
assert sf.vers[0].file_size == 5
def test_sync_delete_cascades():
session = init_db()
sync = factory_helper.Sync()
sync_file = factory_helper.SyncFile(sync, 1, fake=True,
file_size=98765 ,is_dir=False)
host = factory_helper.Host(sync, 1)
host_file = factory_helper.HostFile(host, 1, is_dir=False)
session.add(sync)
session.commit()
sync = session.query(Sync).first()
assert len(sync.hosts) > 0
assert len(session.query(Host).all()) > 0
assert len(session.query(SyncFile).all()) > 0
assert len(session.query(HostFile).all()) > 0
session.delete(sync)
assert len(session.query(Sync).all()) == 0
assert len(session.query(Host).all()) == 0
assert len(session.query(SyncFile).all()) == 0
assert len(session.query(HostFile).all()) == 0
def test_resolution():
db = init_db()
r = Resolution()
sync = fh.SyncRand()
host = fh.HostRand(sync)
r.sync = sync
r.host = host
r.host_file = host.host_files[0]
r.sync_file = sync.sync_files[0]
r.status = Resolution.THEIRS_CHANGED
r.state = Resolution.DELETED
db.add(r)
db.commit()
return db
def test_lazy_loading():
db = test_resolution()
r = db.query(Resolution).first()
d = Download(rel_path="test", sync_id=r.sync_id)
r.download = d
db.add(r)
db.commit()
r = db.query(Resolution).first()
assert r.download is not None
| gpl-3.0 | -435,736,149,443,689,500 | 28.076923 | 66 | 0.637566 | false |
rvmoura96/projeto-almoxarifado | myvenv/Lib/site-packages/django_filters/rest_framework/filterset.py | 1 | 1483 |
from __future__ import absolute_import
from copy import deepcopy
from django import forms
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_filters import filterset
from .. import compat, utils
from .filters import BooleanFilter, IsoDateTimeFilter
FILTER_FOR_DBFIELD_DEFAULTS = deepcopy(filterset.FILTER_FOR_DBFIELD_DEFAULTS)
FILTER_FOR_DBFIELD_DEFAULTS.update({
models.DateTimeField: {'filter_class': IsoDateTimeFilter},
models.BooleanField: {'filter_class': BooleanFilter},
})
class FilterSet(filterset.FilterSet):
FILTER_DEFAULTS = FILTER_FOR_DBFIELD_DEFAULTS
@property
def form(self):
form = super(FilterSet, self).form
if compat.is_crispy():
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit
layout_components = list(form.fields.keys()) + [
Submit('', _('Submit'), css_class='btn-default'),
]
helper = FormHelper()
helper.form_method = 'GET'
helper.template_pack = 'bootstrap3'
helper.layout = Layout(*layout_components)
form.helper = helper
return form
@property
def qs(self):
from rest_framework.exceptions import ValidationError
try:
return super(FilterSet, self).qs
except forms.ValidationError as e:
raise ValidationError(utils.raw_validation(e))
| mit | -1,568,155,611,122,657,300 | 27.519231 | 77 | 0.662846 | false |
trueship/oauth2app | tests/testsite/apps/oauth2/urls.py | 1 | 1449 | #-*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from oauth2app.token import TokenGenerator
from oauth2app.consts import MAC
urlpatterns = patterns('',
(r'^missing_redirect_uri/?$', 'testsite.apps.oauth2.views.missing_redirect_uri'),
(r'^authorize_not_refreshable/?$', 'testsite.apps.oauth2.views.authorize_not_refreshable'),
(r'^authorize_mac/?$', 'testsite.apps.oauth2.views.authorize_mac'),
(r'^authorize_first_name/?$', 'testsite.apps.oauth2.views.authorize_first_name'),
(r'^authorize_first_name/?$', 'testsite.apps.oauth2.views.authorize_last_name'),
(r'^authorize_first_and_last_name/?$', 'testsite.apps.oauth2.views.authorize_first_and_last_name'),
(r'^authorize_no_scope/?$', 'testsite.apps.oauth2.views.authorize_no_scope'),
(r'^authorize_code/?$', 'testsite.apps.oauth2.views.authorize_code'),
(r'^authorize_token/?$', 'testsite.apps.oauth2.views.authorize_token'),
(r'^authorize_token_mac/?$', 'testsite.apps.oauth2.views.authorize_token_mac'),
(r'^authorize_code_and_token/?$', 'testsite.apps.oauth2.views.authorize_code_and_token'),
(r'^token/?$', 'oauth2app.token.handler'),
(r'^token_mac/?$', TokenGenerator(authentication_method=MAC))
)
| mit | 2,713,320,258,166,799,000 | 68 | 109 | 0.590752 | false |
tom-f-oconnell/multi_tracker | multi_tracker_analysis/data_slicing.py | 1 | 3233 | import numpy as np
def get_keys_in_framerange(pd, framerange):
return np.unique(pd.ix[framerange[0]:framerange[-1]].objid)
def get_frames_for_key(pd, key):
return pd[pd.objid==key].frames.values
def get_data_in_framerange(pd, framerange):
# pd_subset
return pd.ix[framerange[0]:framerange[-1]]
def get_data_in_epoch_timerange(pd, timerange):
# pd_subset
return pd[(pd.time_epoch>timerange[0]) & (pd.time_epoch<timerange[1])]
def get_nframes_per_key(pd):
first_key = np.min(pd.objid)
last_key = np.max(pd.objid)
bins = np.arange(first_key, last_key+2, dtype=float)
bins -= 0.5
h, b = np.histogram(pd.objid, bins)
keys = np.arange(first_key, last_key+1, dtype=int)
return keys, h
def get_nkeys_per_frame(pd):
first_key = np.min(pd.frames)
last_key = np.max(pd.frames)
bins = np.arange(first_key, last_key, dtype=float)
bins -= 0.5
h, b = np.histogram(pd.frames, bins)
# can use pd.frames.groupby(pd.frames).agg('count')
return h
def calc_frames_with_object_in_circular_region(pd, center, radius, region_name='region'):
'''
center - list (x,y) units should match units of position_x and position_y
'''
x = pd.position_x
y = pd.position_y
r0 = (center[0]-x)**2 + (center[1]-y)**2
indices = np.where( r0<= radius**2 )
pd[region_name] = np.zeros_like(pd.position_x)
pd[region_name].iloc[indices] = 1
return pd
def calc_frames_with_object_NOT_in_circular_region(pd, center, radius, region_name='region'):
'''
center - list (x,y) units should match units of position_x and position_y
'''
x = pd.position_x
y = pd.position_y
r0 = (center[0]-x)**2 + (center[1]-y)**2
indices = np.where( r0> radius**2 )
pd[region_name] = np.zeros_like(pd.position_x)
pd[region_name].iloc[indices] = 1
return pd
def remove_objects_that_enter_area_outside_circular_region(pd, center, radius, region_name='outofbounds'):
pd = calc_frames_with_object_NOT_in_circular_region(pd, center, radius, region_name=region_name)
outofbounds = np.unique(pd[pd[region_name]==1].objid.values)
keys_ok = [key for key in pd.objid if key not in outofbounds]
indices_where_object_acceptable = pd.objid.isin(keys_ok)
culled_pd = pd[indices_where_object_acceptable]
return culled_pd
def calc_frames_with_object_in_rectangular_region(pd, x_range, y_range, z_range=None, region_name='region'):
'''
center - list (x,y) units should match units of position_x and position_y
'''
if z_range is None:
x = pd.position_x
y = pd.position_y
indices = np.where( (x>x_range[0]) & (x<x_range[-1]) & (y>y_range[0]) & (y<y_range[-1]) )
else:
x = pd.position_x
y = pd.position_y
z = pd.position_z
indices = np.where( (x>x_range[0]) & (x<x_range[-1]) & (y>y_range[0]) & (y<y_range[-1]) & (z>z_range[0]) & (z<z_range[-1]) )
pd[region_name] = np.zeros_like(pd.position_x)
pd[region_name].iloc[indices] = 1
return pd
def get_pd_subset_from_keys(pd, keys):
pd_subset = pd.query('objid in @keys')
return pd_subset
| mit | -5,570,509,066,048,745,000 | 31.33 | 132 | 0.615218 | false |
globocom/database-as-a-service | dbaas/account/migrations/0010_auto__add_field_team_token.py | 1 | 8543 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Team.token'
db.add_column(u'account_team', 'token',
self.gf('django.db.models.fields.CharField')(max_length=406, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Team.token'
db.delete_column(u'account_team', 'token')
models = {
u'account.organization': {
'Meta': {'object_name': 'Organization'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grafana_datasource': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'grafana_hostgroup': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_orgid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.roleenvironment': {
'Meta': {'object_name': 'RoleEnvironment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'roles'", 'blank': 'True', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'role_environment'", 'unique': 'True', 'to': u"orm['auth.Group']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'team_organization'", 'on_delete': 'models.PROTECT', 'to': u"orm['account.Organization']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['account'] | bsd-3-clause | 7,315,269,892,506,538,000 | 75.285714 | 195 | 0.557415 | false |
Psirus/altay | altai/gui/driver_selection_group.py | 1 | 3169 | """ Group from which to select manufacturer and model """
import PySide2.QtCore as QtCore
import PySide2.QtWidgets as QtWidgets
from . import config
from ..lib.driver import Driver
class DriverSelectionGroup(QtWidgets.QGroupBox):
""" Group from which to select manufacturer and model """
driver_changed = QtCore.Signal(Driver)
def __init__(self):
# Driver selection setup
QtWidgets.QGroupBox.__init__(self, "Driver Selection")
driver_selection_form = QtWidgets.QFormLayout(self)
driver_selection_form.setFieldGrowthPolicy(
QtWidgets.QFormLayout.FieldsStayAtSizeHint)
driver_manuf_label = QtWidgets.QLabel(self)
driver_manuf_label.setText("Manufacturer")
self.driver_manuf_box = QtWidgets.QComboBox(self)
self.driver_manuf_box.activated.connect(self.set_manufacturer)
driver_model_label = QtWidgets.QLabel(self)
driver_model_label.setText("Model")
self.driver_model_box = QtWidgets.QComboBox(self)
self.driver_model_box.activated.connect(self.change_driver)
for manufacturer in config.driver_db.manufacturers:
self.driver_manuf_box.addItem(manufacturer)
self.current_manuf = self.driver_manuf_box.currentText()
for driver in config.driver_db:
if driver.manufacturer == self.current_manuf:
self.driver_model_box.addItem(driver.model)
self.current_model = self.driver_model_box.currentText()
for driver in config.driver_db:
if ((self.current_model == driver.model) and
(self.current_manuf == driver.manufacturer)):
self.current_driver = driver
driver_selection_form.addRow(driver_manuf_label, self.driver_manuf_box)
driver_selection_form.addRow(driver_model_label, self.driver_model_box)
self.setLayout(driver_selection_form)
def update_drivers(self, manufacturers):
""" When manufacturer is added to DB, update the comboboxes in this
group """
self.driver_manuf_box.clear()
for manufacturer in manufacturers:
self.driver_manuf_box.addItem(manufacturer)
self.set_manufacturer(0)
def set_manufacturer(self, index):
""" Change manufacturer, repopulate model box and emit driver change
signal"""
self.current_manuf = self.driver_manuf_box.itemText(index)
self.driver_model_box.clear()
for driver in config.driver_db:
if driver.manufacturer == self.current_manuf:
self.driver_model_box.addItem(driver.model)
self.change_driver()
def change_driver(self):
""" A new driver is selected; emit signal containing the currently
selected driver """
self.current_manuf = self.driver_manuf_box.currentText()
self.current_model = self.driver_model_box.currentText()
for driver in config.driver_db:
if ((self.current_model == driver.model) and
(self.current_manuf == driver.manufacturer)):
self.current_driver = driver
self.driver_changed.emit(self.current_driver)
| bsd-3-clause | 4,015,538,848,394,772,500 | 42.410959 | 79 | 0.662354 | false |
huggingface/transformers | src/transformers/models/t5/tokenization_t5_fast.py | 1 | 8622 | # coding=utf-8
# Copyright 2018 T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model T5."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_t5 import T5Tokenizer
else:
T5Tokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class T5TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" T5 tokenizer (backed by HuggingFace's `tokenizers` library). Based on `Unigram
<https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (:obj:`int`, `optional`, defaults to 100):
Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
like in T5 preprocessing see `here
<https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117>`__).
additional_special_tokens (:obj:`List[str]`, `optional`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = T5Tokenizer
prefix_tokens: List[int] = []
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=100,
additional_special_tokens=None,
**kwargs
):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
extra_tokens = len(set(filter(lambda x: bool("extra_id_" in str(x)), additional_special_tokens)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to T5Tokenizer. "
"In this case the additional_special_tokens must include the extra_ids tokens"
)
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
extra_ids=extra_ids,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
self._extra_ids = extra_ids
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
logger.info(f"Copy vocab file to {out_vocab_file}")
return (out_vocab_file,)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: ``X </s>``
- pair of sequences: ``A </s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
token_ids_0 = token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0
else:
token_ids_1 = token_ids_1 + [self.eos_token_id]
return self.prefix_tokens + token_ids_0 + token_ids_1
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
| apache-2.0 | 2,695,773,239,925,617,700 | 41.683168 | 164 | 0.6299 | false |
Elizaveta239/PyDev.Debugger | tests_python/test_bytecode_modification.py | 1 | 23477 | import dis
import sys
import unittest
from io import StringIO
import pytest
from _pydevd_frame_eval.pydevd_modify_bytecode import insert_code
from opcode import EXTENDED_ARG
TRACE_MESSAGE = "Trace called"
def tracing():
print(TRACE_MESSAGE)
def call_tracing():
return tracing()
def bar(a, b):
return a + b
IS_PY36 = sys.version_info[0] == 3 and sys.version_info[1] == 6
@pytest.mark.skipif(not IS_PY36, reason='Test requires Python 3.6')
class TestInsertCode(unittest.TestCase):
lines_separator = "---Line tested---"
def check_insert_every_line(self, func_to_modify, func_to_insert, number_of_lines):
first_line = func_to_modify.__code__.co_firstlineno + 1
last_line = first_line + number_of_lines
for i in range(first_line, last_line):
self.check_insert_to_line_with_exec(func_to_modify, func_to_insert, i)
print(self.lines_separator)
def check_insert_to_line_with_exec(self, func_to_modify, func_to_insert, line_number):
code_orig = func_to_modify.__code__
code_to_insert = func_to_insert.__code__
success, result = insert_code(code_orig, code_to_insert, line_number)
exec(result)
output = sys.stdout.getvalue().strip().split(self.lines_separator)[-1]
self.assertTrue(TRACE_MESSAGE in output)
def check_insert_to_line_by_symbols(self, func_to_modify, func_to_insert, line_number, code_for_check):
code_orig = func_to_modify.__code__
code_to_insert = func_to_insert.__code__
success, result = insert_code(code_orig, code_to_insert, line_number)
self.compare_bytes_sequence(list(result.co_code), list(code_for_check.co_code), len(code_to_insert.co_code))
def compare_bytes_sequence(self, code1, code2, inserted_code_size):
"""
Compare code after modification and the real code
Since we add POP_JUMP_IF_TRUE instruction, we can't compare modified code and the real code. That's why we
allow some inaccuracies while code comparison
:param code1: result code after modification
:param code2: a real code for checking
:param inserted_code_size: size of inserted code
"""
seq1 = [(offset, op, arg) for offset, op, arg in dis._unpack_opargs(code1)]
seq2 = [(offset, op, arg) for offset, op, arg in dis._unpack_opargs(code2)]
assert len(seq1) == len(seq2), "Bytes sequences have different lengths %s != %s" % (len(seq1), len(seq2))
for i in range(len(seq1)):
of, op1, arg1 = seq1[i]
_, op2, arg2 = seq2[i]
if op1 != op2:
if op1 == 115 and op2 == 1:
# it's ok, because we added POP_JUMP_IF_TRUE manually, but it's POP_TOP in the real code
# inserted code - 2 (removed return instruction) - real code inserted
# Jump should be done to the beginning of inserted fragment
self.assertEqual(arg1, of - (inserted_code_size - 2))
continue
elif op1 == EXTENDED_ARG and op2 == 12:
# we added a real UNARY_NOT to balance EXTENDED_ARG added by new jump instruction
# i.e. inserted code size was increased as well
inserted_code_size += 2
continue
self.assertEqual(op1, op2, "Different operators at offset {}".format(of))
if arg1 != arg2:
if op1 in (100, 101, 106, 116):
# Sometimes indexes of variable names and consts may be different, when we insert them, it's ok
continue
else:
self.assertEquals(arg1, arg2, "Different arguments at offset {}".format(of))
def test_line(self):
def foo():
global global_loaded
global_loaded()
def method():
a = 10
b = 20
c = 20
success, result = insert_code(method.__code__, foo.__code__, method.__code__.co_firstlineno + 1)
assert success
assert list(result.co_lnotab) == [10, 1, 4, 1, 4, 1]
success, result = insert_code(method.__code__, foo.__code__, method.__code__.co_firstlineno + 2)
assert success
assert list(result.co_lnotab) == [0, 1, 14, 1, 4, 1]
success, result = insert_code(method.__code__, foo.__code__, method.__code__.co_firstlineno + 3)
assert success
assert list(result.co_lnotab) == [0, 1, 4, 1, 14, 1]
def test_assignment(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def original():
a = 1
b = 2
c = 3
self.check_insert_every_line(original, tracing, 3)
finally:
sys.stdout = self.original_stdout
def test_for_loop(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def original():
n = 3
sum = 0
for i in range(n):
sum += i
return sum
self.check_insert_every_line(original, tracing, 5)
finally:
sys.stdout = self.original_stdout
def test_if(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def original():
if True:
a = 1
else:
a = 0
print(a)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 2)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 5)
finally:
sys.stdout = self.original_stdout
def test_else(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def original():
if False:
a = 1
else:
a = 0
print(a)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 4)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 5)
finally:
sys.stdout = self.original_stdout
def test_for_else(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def original():
sum = 0
for i in range(3):
sum += i
else:
print(sum)
def check_line_1():
tracing()
sum = 0
for i in range(3):
sum += i
else:
print(sum)
def check_line_3():
sum = 0
for i in range(3):
tracing()
sum += i
else:
print(sum)
def check_line_5():
sum = 0
for i in range(3):
sum += i
else:
tracing()
print(sum)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 1)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 3)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 5)
sys.stdout = self.original_stdout
self.check_insert_to_line_by_symbols(original, call_tracing, original.__code__.co_firstlineno + 1,
check_line_1.__code__)
self.check_insert_to_line_by_symbols(original, call_tracing, original.__code__.co_firstlineno + 3,
check_line_3.__code__)
self.check_insert_to_line_by_symbols(original, call_tracing, original.__code__.co_firstlineno + 5,
check_line_5.__code__)
finally:
sys.stdout = self.original_stdout
def test_elif(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def original():
a = 5
b = 0
if a < 0:
print("a < 0")
elif a < 3:
print("a < 3")
else:
print("a >= 3")
b = a
return b
def check_line_1():
tracing()
a = 5
b = 0
if a < 0:
print("a < 0")
elif a < 3:
print("a < 3")
else:
print("a >= 3")
b = a
return b
def check_line_8():
a = 5
b = 0
if a < 0:
print("a < 0")
elif a < 3:
print("a < 3")
else:
tracing()
print("a >= 3")
b = a
return b
def check_line_9():
a = 5
b = 0
if a < 0:
print("a < 0")
elif a < 3:
print("a < 3")
else:
print("a >= 3")
tracing()
b = a
return b
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 1)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 2)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 8)
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 9)
self.check_insert_to_line_by_symbols(original, call_tracing, original.__code__.co_firstlineno + 1,
check_line_1.__code__)
self.check_insert_to_line_by_symbols(original, call_tracing, original.__code__.co_firstlineno + 8,
check_line_8.__code__)
self.check_insert_to_line_by_symbols(original, call_tracing, original.__code__.co_firstlineno + 9,
check_line_9.__code__)
finally:
sys.stdout = self.original_stdout
def test_call_other_function(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def original():
a = 1
b = 3
c = bar(a, b)
return c
def check_line_3():
a = 1
b = 3
tracing()
c = bar(a, b)
return c
def check_line_4():
a = 1
b = 3
c = bar(a, b)
tracing()
return c
self.check_insert_every_line(original, tracing, 4)
sys.stdout = self.original_stdout
self.check_insert_to_line_by_symbols(original, call_tracing, original.__code__.co_firstlineno + 3,
check_line_3.__code__)
self.check_insert_to_line_by_symbols(original, call_tracing, original.__code__.co_firstlineno + 4,
check_line_4.__code__)
finally:
sys.stdout = self.original_stdout
def test_class_method(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
class A(object):
@staticmethod
def foo():
print("i'm in foo")
@staticmethod
def check_line_2():
tracing()
print("i'm in foo")
original = A.foo
self.check_insert_to_line_with_exec(original, tracing, original.__code__.co_firstlineno + 2)
self.check_insert_to_line_by_symbols(original, call_tracing, original.__code__.co_firstlineno + 2,
A.check_line_2.__code__)
finally:
sys.stdout = self.original_stdout
def test_offset_overflow(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def foo():
a = 1
b = 2 # breakpoint
c = 3
a1 = 1 if a > 1 else 2
a2 = 1 if a > 1 else 2
a3 = 1 if a > 1 else 2
a4 = 1 if a > 1 else 2
a5 = 1 if a > 1 else 2
a6 = 1 if a > 1 else 2
a7 = 1 if a > 1 else 2
a8 = 1 if a > 1 else 2
a9 = 1 if a > 1 else 2
a10 = 1 if a > 1 else 2
a11 = 1 if a > 1 else 2
a12 = 1 if a > 1 else 2
a13 = 1 if a > 1 else 2
for i in range(1):
if a > 0:
print("111")
# a = 1
else:
print("222")
return b
def check_line_2():
a = 1
tracing()
b = 2
c = 3
a1 = 1 if a > 1 else 2
a2 = 1 if a > 1 else 2
a3 = 1 if a > 1 else 2
a4 = 1 if a > 1 else 2
a5 = 1 if a > 1 else 2
a6 = 1 if a > 1 else 2
a7 = 1 if a > 1 else 2
a8 = 1 if a > 1 else 2
a9 = 1 if a > 1 else 2
a10 = 1 if a > 1 else 2
a11 = 1 if a > 1 else 2
a12 = 1 if a > 1 else 2
a13 = 1 if a > 1 else 2
for i in range(1):
if a > 0:
print("111")
# a = 1
else:
print("222")
return b
self.check_insert_to_line_with_exec(foo, tracing, foo.__code__.co_firstlineno + 2)
self.check_insert_to_line_by_symbols(foo, call_tracing, foo.__code__.co_firstlineno + 2,
check_line_2.__code__)
finally:
sys.stdout = self.original_stdout
def test_long_lines(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def foo():
a = 1
b = 1 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23
c = 1 if b > 1 else 2 if b > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23
d = 1 if c > 1 else 2 if c > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23
e = d + 1
return e
def check_line_2():
a = 1
tracing()
b = 1 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23
c = 1 if b > 1 else 2 if b > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23
d = 1 if c > 1 else 2 if c > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23 if a > 1 else 2 if a > 0 else 3 if a > 4 else 23
e = d + 1
return e
self.check_insert_to_line_with_exec(foo, tracing, foo.__code__.co_firstlineno + 2)
sys.stdout = self.original_stdout
self.check_insert_to_line_by_symbols(foo, call_tracing, foo.__code__.co_firstlineno + 2,
check_line_2.__code__)
finally:
sys.stdout = self.original_stdout
def test_many_names(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
from tests_python.resources._bytecode_many_names_example import foo
self.check_insert_to_line_with_exec(foo, tracing, foo.__code__.co_firstlineno + 2)
finally:
sys.stdout = self.original_stdout
def test_extended_arg_overflow(self):
from tests_python.resources._bytecode_overflow_example import Dummy, DummyTracing
self.check_insert_to_line_by_symbols(Dummy.fun, call_tracing, Dummy.fun.__code__.co_firstlineno + 3,
DummyTracing.fun.__code__)
def test_double_extended_arg(self):
self.original_stdout = sys.stdout
sys.stdout = StringIO()
try:
def foo():
a = 1
b = 2
if b > 0:
d = a + b
d += 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
a = a + 1
return a
def foo_check():
a = 1
b = 2
tracing()
if b > 0:
d = a + b
d += 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
a = a + 1
return a
def foo_check_2():
a = 1
b = 2
if b > 0:
d = a + b
d += 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
not tracing() # add 'not' to balance EXTENDED_ARG when jumping
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
b = b - 1 if a > 0 else b + 1
a = a + 1
return a
self.check_insert_to_line_with_exec(foo, tracing, foo.__code__.co_firstlineno + 2)
sys.stdout = self.original_stdout
self.check_insert_to_line_by_symbols(foo, call_tracing, foo.__code__.co_firstlineno + 3,
foo_check.__code__)
self.check_insert_to_line_by_symbols(foo, call_tracing, foo.__code__.co_firstlineno + 21,
foo_check_2.__code__)
finally:
sys.stdout = self.original_stdout
| epl-1.0 | 5,869,620,345,414,699,000 | 39.758681 | 511 | 0.446309 | false |
mayuanucas/notes | python/code/linearunit.py | 1 | 1443 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from perceptron import Perceptron
class LinearUnit(Perceptron):
def __init__(self, input_num, activator):
'''
初始化感知器,设置输入参数的个数,以及激活函数。
'''
Perceptron.__init__(self, input_num, activator)
def func(x):
'''
定义激活函数func
'''
return x
def get_training_dataset():
'''
构建训练数
# 输入向量列表,每一项代表工作年限
'''
all_input_vecs = [[5], [3], [8], [1.4], [10.1]]
# 期望的输出列表,注意要与输入一一对应,代表 月薪
labels = [5500, 2300, 7600, 1800, 11400]
return all_input_vecs, labels
def train_linear_unit():
'''
使用数据训练线性单元
'''
# 创建感知器,输入参数个数为1,激活函数为func
lu = LinearUnit(1, func)
# 训练,迭代10轮, 学习速率为0.1
all_input_vecs, labels = get_training_dataset()
lu.train(all_input_vecs, labels, 10, 0.01)
#返回训练好的感知器
return lu
if __name__ == '__main__':
# 训练线性单元
linear_unit = train_linear_unit()
# 打印训练获得的权重
print(linear_unit)
# 测试
print('Work 3.4 years, monthly salary = %.2f' % linear_unit.predict([3.4]))
print('Work 15 years, monthly salary = %.2f' % linear_unit.predict([15]))
print('Work 1.5 years, monthly salary = %.2f' % linear_unit.predict([1.5]))
print('Work 6.3 years, monthly salary = %.2f' % linear_unit.predict([6.3])) | apache-2.0 | -2,600,775,387,799,649,300 | 22.28 | 76 | 0.647463 | false |
collective/ECSpooler | backends/junit/JUnit.py | 1 | 18661 | # -*- coding: utf-8 -*-
# $Id$
#
# Copyright (c) 2007-2011 Otto-von-Guericke-Universität Magdeburg
#
# This file is part of ECSpooler.
#
################################################################################
# Changelog #
################################################################################
#
# 04.03.2009, chbauman:
# replaced '/' by join() for better platform independency
# formatted source code
# 16.03.2009, chbauman:
# worked on comments
# changed version
# new function 'getLibInImportPos'
# outhoused RE for importsArray to JUnitConf.py
# improved 'handleStudentsImports'
# improved error message
# removed useless comments
# 17.03.2009, chbauman:
# import junit_libs.*, if no other import is declared
# 30.03.2009, chbauman:
# insertion of imports in handleStudentsImports causes increase of line_offset
# 06.04.2009, chbauman:
# implemented _postProcessCheckSemantics
# 07.04.2009, chbauman:
# added some comments
# all post processors delete string JUnitConf.NS_STUDENT from messages now.
# 30.04.2009, chbauman:
# replaced re.sub whenever possible
# 12.07.2009, amelung:
# renamed JUnitConf to config; moved some settings from config to this file
# 16.05.2010, chbauman:
# New Regular Expression for finding closed multiline comments.
# grantValidPackage handles commented package declarations more gracefully, now.
# Some minor formattings.
import sys, os, re
import logging
from os.path import join
from lib.data.BackendResult import BackendResult
from lib.ProgrammingBackend import ProgrammingBackend, EX_OK
# import config file
from backends.junit import config
LOG = logging.getLogger()
## Regular expressions to extract certain information
# CLASS_NAME_RE consists of ClassModifier? class Identifier Super? Interfaces? ClassBody
# (see http://java.sun.com/docs/books/jls/first_edition/html/8.doc.html#15372 [04.03.2009@09:50])
# We are only interested in the public class
javaClassModifier = 'public'
# Classnames start with a lowercase letter followed by some other letters
javaClassName = '[A-Z]\w*'
# Generics are strings surrounded by '<' and '>'.
javaGeneric = '\<\w*\>'
# An Identifier is a name followed by an optional generic argument
javaIdentifier = '%s(%s)?' % (javaClassName, javaGeneric)
# 'extends' followed by an identifier signals that this class inherits from another class
javaSuper = 'extends\s+%s' % javaIdentifier
# 'implements' followed by a comma-separated list of identifiers signals wich interfaces a class has
javaInterfaces = 'implements\s+(%s)?(\s*,\s*%s)*' % (javaIdentifier, javaIdentifier)
# '{' is sufficient for the (our) body
javaClassBody = '\{'
# Since the class of the name we want to extract definately is public, <ClassModifier> is NOT optional
javaClassDeclaration = '%s\s+class\s+(?P<className>%s)(%s)?\s*(%s)?\s*(%s)?\s*%s' % (javaClassModifier, javaClassName, javaGeneric, javaSuper, javaInterfaces, javaClassBody)
CLASS_NAME_RE = re.compile(javaClassDeclaration)
# Determines the student's chosen package
PACKAGE_NAME_RE = re.compile('^\s*package\s+(?P<packageName>[a-z]+\w*);')
# Finds closed multiline comments (flags indicate multiline and dotall matchings)
CLOSED_MULTILINE_COMMENTS_RE = re.compile('/\*.+?\*/', re.M | re.S)
# Finds all import declarations excluding packages java.*
IMPORT_NAME_NOT_JAVA_RE = re.compile('import\s+(?!java\.)(?P<name>.*);')
# This RE will search for the first two lines of a Failure-Object trace.
# java.lang.ArrayIndexOutOfBoundsException: 2 <- will be matched
# at studentPackage.Matrix.mult(Matrix.java:20) <- will be matched
# at JUnitTester.multAdd(JUnitTester.java:29) <- will NOT be matched
#FAILURE_TRACE_RE = re.compile('(\w|\.)+?:\s\d+(\s\t)*?at\s%s\.\w+?\.\w+?\(\w+?\.\w+?:(?P<number>\d+?)\)' % NS_STUDENT)
FAILURE_TRACE_RE = re.compile('.*?%s.*?(?P<number>\d+).*?$' % config.NS_STUDENT, re.M | re.S)
class JUnit(ProgrammingBackend):
"""
Backend class that determines whether a submission of java code
returns expected values which are defined in JUnit tests, or not.
"""
id = 'junit'
name = 'JUnit'
version = '1.2'
schema = config.inputSchema
testSchema = config.tests
srcFileSuffix = '.java'
# While preprocessing student's submission it may occur that some lines
# have to be added (like package declarations). In case of failures during
# checks the feedbacks have to be scanned for line numbers and to be
# updated (minus line_offset).
line_offset = 0
#-------- Constructor --------------------------------------------------------
def __init__(self, params, versionFile=__file__):
"""
This constructor is needed to set the logging environment.
"""
ProgrammingBackend.__init__(self, params, versionFile)
#-------- Methods for modifying incomming source code ------------------------
def getClassName(self, source):
"""
Returns the class name of a given java source.
@param source: Java source code.
@return: Class name of given source code.
"""
matcher = CLASS_NAME_RE.search(source)
assert matcher is not None, \
'Name of the public class could not be extracted from source\n\n%s' % source
return matcher.group('className')
def replaceVariableCLASS(self, source, className):
"""
Replaces all Variables ${CLASS} with the class name of a given java source.
@param source: Java source code.
@param className: Class name that ${CLASS} will be substituted with.
@return: source with substituted ${CLASS}.
"""
return source.replace('${CLASS}', className)
def ensureValidPackage(self, source):
"""
Determines whether source already has a package declaration.
If yes, it will be overwritten with a new declaration.
If not, a new package declaration will be written.
Note, that this method ignores invalid package declarations inside of comments by excluding
lines containing multiline comments in the search string completely and by forcing a package
declaration to not be inside of a single line comment.
@param source: Java source code.
@return: source with valid package declaration.
"""
# Temporarily remove all multiline closed comments:
noMultilineCommentSource = re.sub(CLOSED_MULTILINE_COMMENTS_RE, '', source)
# Try to find a proper package declaration
matcher = PACKAGE_NAME_RE.search(noMultilineCommentSource)
if matcher is not None:
# We found a package declaration -> replace it!
return re.sub('package\s+.*;',
'package %s;' % config.NS_STUDENT,
source)
else:
tmp_result = 'package %s;\n\n' % config.NS_STUDENT
# we've inserted two lines of source code:
self.line_offset += 2
return tmp_result + source
def getLibInImportPos(self, libName, importDecl):
"""
Searches in importDecl for libName and returns the right-most position.
Since we are working on Java import declarations, we have to search for libName preceeded by
space or a dot and followed by a dot or nothing.
@param libName: Name of library that shall be searched
@param importDecl: A Java import declaration libName will be searched in.
@return: right-most position of libName in importDecl or -1 if not found.
"""
pos = -1
libInImports = re.search('(?<=(\s|\.))' + libName + '(\.|$)', importDecl)
# if libInImports is None, libName is not in importDecl:
if libInImports is not None:
match = libInImports.group()
# find right-most position:
pos = importDecl.rfind(match)
return pos
def handleStudentsImports(self, source):
"""
Student's imports should be included by packages which are set
set in Java classpath command line option. Single Java classes
in junit_libs will be included by this method.
@param source: Java source code.
@return: source with special import declarations for junit_libs
"""
# since a valid package is already written, we can access it:
packageDeclaration = 'package %s;' % config.NS_STUDENT
replacement = '%s\n\nimport %s.*;' % (packageDeclaration,
config.JUNIT_LIBS)
source = source.replace(packageDeclaration, replacement, 1)
# by adding the new import declaration, line_offset
# increases by 2:
self.line_offset += 2
return source
#-------- Syntax methods that have to be overwritten -------------------------
def _preProcessCheckSyntax(self, test, src, **kwargs):
# at the very beginning of our syntax check we set line_offset
# to 0. Setting it to 0 in _postProcessCheckSyntax would lead to an
# accumulation of offsets if a sumbission is syntactically incorrect.
self.line_offset = 0
srcWithValidPackages = self.ensureValidPackage(src)
#logging.debug(srcWithValidPackages)
preProcessedSource = self.handleStudentsImports(srcWithValidPackages)
#logging.debug(preProcessedSource)
className = self.getClassName(src)
return preProcessedSource, className
def _postProcessCheckSyntax(self, test, message):
"""
This method subtracts line_offset from the line numbers the compiler
returned in its message.
After that, every occurence of config.NS_STUDENT+'.' will be erased.
@see: ProgrammingBackend._postProcessCheckSyntax
"""
matches = re.findall('\w+\.\w+:(?P<numbers>\d+):', message)
for match in matches:
new_line_number = int(match) - self.line_offset
message = message.replace(match, str(new_line_number), 1)
message = message.replace(config.NS_STUDENT + '.', '')
return message
def _process_checkSyntax(self, jobId, testSpec, submission):
"""
Tests the syntax of a programm.
@param jobId: ID for this test job
@param test: name of the selected test environment (cf. self.testSchema)
@return: a BackendResult or None if test succeeded
"""
# get the compiler or if not available the interpreter
compiler = testSpec.compiler or testSpec.interpreter
if compiler:
try:
# test term (e.g., student's source code)
try:
src, mName = self._preProcessCheckSyntax(testSpec, submission)
except AssertionError, ae:
return BackendResult(False, str(ae))
LOG.info('Running syntax check with test: %s' % testSpec.getName())
# guarantee that the submission will be put in folder NS_STUDENT
folder = join(jobId, config.NS_STUDENT)
#logging.debug('xxx: %s' % folder)
module = self._writeModule(
mName,
src,
self.srcFileSuffix,
folder,
testSpec.encoding)
#logging.debug('xxx: %s' % module)
exitcode, result = \
self._runInterpreter(
compiler,
os.path.dirname(module['file']),
os.path.basename(module['file']),
config.CLASSPATH_SETTINGS)
#logging.debug('exitcode: %s' % repr(exitcode))
#logging.debug('result: %s' % repr(result))
except Exception, e:
msg = 'Internal error during syntax check: %s: %s' % \
(sys.exc_info()[0], e)
LOG.error(msg)
return BackendResult(-220, msg)
LOG.debug('exitcode: %s' % repr(-exitcode))
# consider exit code
if exitcode != EX_OK:
result = self._postProcessCheckSyntax(testSpec, result)
#return BackendResult(-exitcode, result or repr(-exitcode))
return BackendResult(False, result or repr(-exitcode))
else:
msg = 'No compiler/interpreter defined (test spec: %s).' \
% testSpec.getName()
LOG.error(msg)
return BackendResult(-221, msg)
# everything seems to be ok
return None
#-------- Semantic methods that have to be overwritten -----------------------
def _process_checkSemantics(self, job):
"""
Checks the semantics of a program.
@param jobId: ID for this job
@return: a BackendResult.
"""
inputFields = self.schema.filterFields(type = 'InputField')
# variable declaration
exitcode = -42
# Test if an InputField exists
assert inputFields, 'No InputFields found!'
# get submission
submission = job['submission']
assert submission is not None, \
'Semantic check requires a valid submission:\n\n%s' % repr(submission)
tests = self._getTests(job)
if len(tests) == 0:
message = 'No test specification selected.'
LOG.warn('%s, %s' % (message, job.getId()))
return BackendResult(-217, message)
test = tests[0]
try:
submissionClassName = self.getClassName(submission)
except AssertionError, ae:
message = str(ae)
LOG.warn('%s, %s' % (message, job.getId()))
return BackendResult(-230, message)
# get compiler
compiler = test.compiler
# get interpreter
interpreter = test.interpreter
# get template
wrapper_code = test.semantic
#----------- compile and run Wrapper Template ------------------------
# replace all variables in wrapper template
for field in self.schema.filterFields(type = 'InputField'):
field_text = job[field.getName()]
# empty fields should cause that no text is written
if field_text is None:
field_text = ""
#wrapper_code = re.sub('\$\{%s\}' % field.getName(),
# field_text,
# wrapper_code)
wrapper_code = wrapper_code.replace('${%s}' % field.getName(), field_text)
wrapper_code = self.replaceVariableCLASS(wrapper_code, submissionClassName)
try:
wrapperModule = self._writeModule(
config.CLASS_SEMANTIC_CHECK,
wrapper_code,
suffix = self.srcFileSuffix,
dir = job.getId(),
encoding = test.encoding)
# compile using javac
exitcode, result = self._runInterpreter(
compiler,
os.path.dirname(wrapperModule['file']),
os.path.basename(wrapperModule['file']),
config.CLASSPATH_SETTINGS)
#assert exitcode == EX_OK, \
# 'Error in wrapper code during semantic check:\n\n%s' % result
if exitcode == EX_OK:
# run using java
exitcode, result = self._runInterpreter(
interpreter,
os.path.dirname(wrapperModule['file']),
config.CLASS_SEMANTIC_CHECK,
config.CLASSPATH_SETTINGS)
except Exception, e:
message = 'Internal error during semantic check: %s: %s' % \
(sys.exc_info()[0], e)
LOG.error(message)
msg = re.sub(config.METHOD_NOT_FOUND_RE, "", message)
return BackendResult(-230, msg)
if exitcode != EX_OK:
# postprocess the result:
result = self._postProcessCheckSemantic(test, result)
result = config.FAILED_TESTS_MESSAGE + '\n\n' + result
return BackendResult(False, result)
else:
#return BackendResult(True, '\nYour submission passed all tests.')
#return BackendResult(True, result)
return BackendResult(True, config.PASSED_ALL_TESTS_MESSAGE)
def _postProcessCheckSemantic(self, test, message):
"""
This method is used to post process interpreter messages.
Two modifications will be performed:
First, the message will be scanned for a Failure trace. If one exists,
the trace is shortened and line_offset subtracted from the returned line
numbers.
Second, every occurence of config.NS_STUDENT+'.' will be erased.
@see: ProgrammingBackend._postProcessCheckSemantic
"""
# scan for Failure trace:
matcher = FAILURE_TRACE_RE.search(message)
# if the matcher is not None, there exists an Failure trace:
if matcher is not None:
match = matcher.group()
# don't forget to subtract line_offset from match's line number
number = matcher.group('number')
message = match.replace(number, str(int(number) - self.line_offset))
# we do not display the whole trace. Show that there was more:
message = message + '\n\t...'
# erase all occurences of config.NS_STUDENT
message = message.replace(config.NS_STUDENT + '.', '')
return message
| gpl-2.0 | -2,208,946,846,713,217,800 | 37.956159 | 173 | 0.569614 | false |
xuxiao19910803/edx-platform | lms/djangoapps/oauth_tianyuyun/views.py | 1 | 1363 | # -*- coding: utf-8 -*-
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, logout, login
from .utils import TianYuClient
from .settings import TIANYUYUN_LOGIN_URL
LOGIN_SUCCESS_REDIRECT_URL = '/dashboard'
LOGIN_CREATE_SUCCESS_REDIRECT_URL = '/dashboard' # '/account/settings'
LOGIN_ERROR_REDIRECT_URL = TIANYUYUN_LOGIN_URL.split('?')[0]
def login_tianyuyun(request):
ticket = request.GET.get('ticket', '')
if ticket:
client = TianYuClient()
usesessionid = client.get_usesessionid_by_ticket(ticket)
if usesessionid:
userinfo = client.get_userinfo_by_sessionid(usesessionid)
if userinfo.get('idcardno', ''):
user = request.user if request.user.is_authenticated() else None
oauth_obj, create = client.get_or_create_oauth_by_userinfo(userinfo, user)
if oauth_obj and oauth_obj.user:
user = authenticate(oauth_obj=oauth_obj, username='')
login(request, user)
if create:
return HttpResponseRedirect(LOGIN_CREATE_SUCCESS_REDIRECT_URL)
else:
return HttpResponseRedirect(LOGIN_SUCCESS_REDIRECT_URL)
return HttpResponseRedirect(LOGIN_SUCCESS_REDIRECT_URL)
| agpl-3.0 | 3,580,869,867,609,412,000 | 40.59375 | 90 | 0.624358 | false |
davy39/eric | Plugins/VcsPlugins/vcsSubversion/Ui_SvnUrlSelectionDialog.py | 1 | 5231 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Plugins/VcsPlugins/vcsSubversion/SvnUrlSelectionDialog.ui'
#
# Created: Tue Nov 18 17:53:57 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SvnUrlSelectionDialog(object):
def setupUi(self, SvnUrlSelectionDialog):
SvnUrlSelectionDialog.setObjectName("SvnUrlSelectionDialog")
SvnUrlSelectionDialog.resize(542, 195)
SvnUrlSelectionDialog.setSizeGripEnabled(True)
self.vboxlayout = QtWidgets.QVBoxLayout(SvnUrlSelectionDialog)
self.vboxlayout.setObjectName("vboxlayout")
self.urlGroup1 = QtWidgets.QGroupBox(SvnUrlSelectionDialog)
self.urlGroup1.setObjectName("urlGroup1")
self.hboxlayout = QtWidgets.QHBoxLayout(self.urlGroup1)
self.hboxlayout.setObjectName("hboxlayout")
self.repoRootLabel1 = QtWidgets.QLabel(self.urlGroup1)
self.repoRootLabel1.setObjectName("repoRootLabel1")
self.hboxlayout.addWidget(self.repoRootLabel1)
self.typeCombo1 = QtWidgets.QComboBox(self.urlGroup1)
self.typeCombo1.setObjectName("typeCombo1")
self.hboxlayout.addWidget(self.typeCombo1)
self.labelCombo1 = QtWidgets.QComboBox(self.urlGroup1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelCombo1.sizePolicy().hasHeightForWidth())
self.labelCombo1.setSizePolicy(sizePolicy)
self.labelCombo1.setEditable(True)
self.labelCombo1.setObjectName("labelCombo1")
self.hboxlayout.addWidget(self.labelCombo1)
self.vboxlayout.addWidget(self.urlGroup1)
self.urlGroup2 = QtWidgets.QGroupBox(SvnUrlSelectionDialog)
self.urlGroup2.setObjectName("urlGroup2")
self.hboxlayout1 = QtWidgets.QHBoxLayout(self.urlGroup2)
self.hboxlayout1.setObjectName("hboxlayout1")
self.repoRootLabel2 = QtWidgets.QLabel(self.urlGroup2)
self.repoRootLabel2.setObjectName("repoRootLabel2")
self.hboxlayout1.addWidget(self.repoRootLabel2)
self.typeCombo2 = QtWidgets.QComboBox(self.urlGroup2)
self.typeCombo2.setObjectName("typeCombo2")
self.hboxlayout1.addWidget(self.typeCombo2)
self.labelCombo2 = QtWidgets.QComboBox(self.urlGroup2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelCombo2.sizePolicy().hasHeightForWidth())
self.labelCombo2.setSizePolicy(sizePolicy)
self.labelCombo2.setEditable(True)
self.labelCombo2.setObjectName("labelCombo2")
self.hboxlayout1.addWidget(self.labelCombo2)
self.vboxlayout.addWidget(self.urlGroup2)
self.summaryCheckBox = QtWidgets.QCheckBox(SvnUrlSelectionDialog)
self.summaryCheckBox.setObjectName("summaryCheckBox")
self.vboxlayout.addWidget(self.summaryCheckBox)
self.buttonBox = QtWidgets.QDialogButtonBox(SvnUrlSelectionDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.vboxlayout.addWidget(self.buttonBox)
self.retranslateUi(SvnUrlSelectionDialog)
self.buttonBox.accepted.connect(SvnUrlSelectionDialog.accept)
self.buttonBox.rejected.connect(SvnUrlSelectionDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SvnUrlSelectionDialog)
SvnUrlSelectionDialog.setTabOrder(self.typeCombo1, self.labelCombo1)
SvnUrlSelectionDialog.setTabOrder(self.labelCombo1, self.typeCombo2)
SvnUrlSelectionDialog.setTabOrder(self.typeCombo2, self.labelCombo2)
SvnUrlSelectionDialog.setTabOrder(self.labelCombo2, self.summaryCheckBox)
SvnUrlSelectionDialog.setTabOrder(self.summaryCheckBox, self.buttonBox)
def retranslateUi(self, SvnUrlSelectionDialog):
_translate = QtCore.QCoreApplication.translate
SvnUrlSelectionDialog.setWindowTitle(_translate("SvnUrlSelectionDialog", "Subversion Diff"))
self.urlGroup1.setTitle(_translate("SvnUrlSelectionDialog", "Repository URL 1"))
self.typeCombo1.setToolTip(_translate("SvnUrlSelectionDialog", "Select the URL type"))
self.labelCombo1.setToolTip(_translate("SvnUrlSelectionDialog", "Enter the label name or path"))
self.urlGroup2.setTitle(_translate("SvnUrlSelectionDialog", "Repository URL 2"))
self.typeCombo2.setToolTip(_translate("SvnUrlSelectionDialog", "Select the URL type"))
self.labelCombo2.setToolTip(_translate("SvnUrlSelectionDialog", "Enter the label name or path"))
self.summaryCheckBox.setToolTip(_translate("SvnUrlSelectionDialog", "Select to just show a summary of differences"))
self.summaryCheckBox.setText(_translate("SvnUrlSelectionDialog", "Summary only"))
| gpl-3.0 | -6,944,341,487,784,356,000 | 57.775281 | 124 | 0.751673 | false |
raffaellod/abamake | src/comk/argparser.py | 1 | 7433 | # -*- coding: utf-8; mode: python; tab-width: 3; indent-tabs-mode: nil -*-
#
# Copyright 2013-2017 Raffaello D. Di Napoli
#
# This file is part of Complemake.
#
# Complemake is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Complemake is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along with Complemake. If not, see
# <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------------------------------------
"""Complemake command line argument parsing."""
import argparse
import os
import comk
##############################################################################################################
class Command(object):
_instances = {}
def __init__(self, name):
self._name = name
self._instances[name] = self
def __repr__(self):
return self._name
@classmethod
def from_str(cls, name):
return cls._instances.get(name, name)
Command.BUILD = Command('build')
Command.CLEAN = Command('clean')
Command.EXEC = Command('exec')
Command.QUERY = Command('query')
##############################################################################################################
class Parser(object):
"""Parses Complemake’s command line."""
_parser = None
def __init__(self):
"""Constructor."""
self._parser = argparse.ArgumentParser(add_help=False)
# Flags that apply to all commands.
self._parser.add_argument(
'--help', action='help',
help='Show this informative message and exit.'
)
self._parser.add_argument(
'-n', '--dry-run', action='store_true',
help='Don’t actually run any external commands. Useful to test if anything needs to be built.'
)
self._parser.add_argument(
'-o', '--output-dir', metavar='/path/to/output/dir', default='',
help='Location where all Complemake output for the project should be stored. Defaults to the ' +
'project’s directory.'
)
self._parser.add_argument(
'-p', '--project', metavar='PROJECT.comk',
help='Complemake project (.comk) containing instructions on how to build targets. If omitted and ' +
'the current directory contains a single file matching *.comk, that file will be used as the ' +
'project.'
)
if comk.os_is_windows():
default_shared_dir = 'Complemake'
user_apps_home_description = 'common repository for application-specific data (typically ' + \
'“Application Data”)'
else:
default_shared_dir = '.comk'
user_apps_home_description = 'user’s $HOME directory'
self._parser.add_argument(
'--shared-dir', metavar='path/to/shared/dir', type=self.get_abs_shared_dir,
default=default_shared_dir,
help=('Directory where Complemake will store data shared across all projects, such as projects’ ' +
'dependencies. Defaults to “{}” in the {}.').format(
default_shared_dir, user_apps_home_description
)
)
self._parser.add_argument(
'-s', '--system-type', metavar='SYSTEM-TYPE',
help='Use SYSTEM-TYPE as the system type for which to build; examples: x86_64-pc-linux-gnu, ' +
'i686-pc-win32. If omitted, detect a default for the machine on which Complemake is being run.'
)
self._parser.add_argument(
'--tool-c++', metavar='/path/to/c++', dest='tool_cxx',
help='Use /path/to/c++ as the C++ compiler (and linker driver, unless --tool-ld is also specified).'
)
self._parser.add_argument(
'--tool-ld', metavar='/path/to/ld',
help='Use /path/to/ld as the linker/linker driver.'
)
self._parser.add_argument(
'-v', '--verbose', action='count', default=0,
help='Increase verbosity level; can be specified multiple times.'
)
subparsers = self._parser.add_subparsers(dest='command')
subparsers.type = Command.from_str
subparsers.required = True
build_subparser = subparsers.add_parser(Command.BUILD)
build_subparser.add_argument(
'--force', action='store_true', dest='force_build',
help='Unconditionally rebuild all targets.'
)
build_subparser.add_argument(
'--force-test', action='store_true',
help='Unconditionally run all test targets.'
)
build_subparser.add_argument(
'-j', '--jobs', default=None, metavar='N', type=int,
help='Build using N processes at at time; if N is omitted, build all independent targets at the ' +
'same time. If not specified, the default is --jobs <number of processors>.'
)
build_subparser.add_argument(
'-k', '--keep-going', action='store_true',
help='Continue building targets even if other independent targets fail.'
)
build_subparser.add_argument(
'-f', '--target-file', metavar='/generated/file', action='append', dest='target_files', default=[],
help='Specify once or more to indicate which target files should be built. ' +
'If no -f or -t arguments are provided, all targets declared in the Complemake project ' +
'(.comk) will be built.'
)
build_subparser.add_argument(
'-t', '--target-name', action='append', dest='target_names', default=[],
help='Specify once or more to indicate which named targets should be built. ' +
'If no -f or -t arguments are provided, all targets declared in the Complemake project ' +
'(.comk) will be built.'
)
build_subparser.add_argument(
'-u', '--update-deps', action='store_true',
help='Update all dependencies (e.g. pull git repo) before building.'
)
clean_subparser = subparsers.add_parser(Command.CLEAN)
exec_subparser = subparsers.add_parser(Command.EXEC)
exec_subparser.add_argument(
'exec_exe', metavar='EXECUTABLE',
help='Command to execute.'
)
exec_subparser.add_argument(
'exec_args', metavar='...', nargs=argparse.REMAINDER,
help='Arguments to pass EXECUTABLE.'
)
query_subparser = subparsers.add_parser(Command.QUERY)
query_group = query_subparser.add_mutually_exclusive_group(required=True)
query_group.add_argument(
'--exec-env', dest='query_exec_env', action='store_true',
help='Print any environment variable assignments needed to execute binaries build by the project.'
)
@staticmethod
def get_abs_shared_dir(shared_dir):
if os.path.isabs(shared_dir):
return shared_dir
else:
return os.path.normpath(os.path.join(comk.get_user_apps_home(), shared_dir))
def parse_args(self, *args, **kwargs):
"""See argparse.ArgumentParser.parse_args()."""
return self._parser.parse_args(*args, **kwargs)
| gpl-3.0 | 7,662,710,956,515,190,000 | 39.966851 | 110 | 0.598247 | false |
Ginkgo-Biloba/Misc-Python | numpy/SciPyInt.py | 1 | 3425 | # coding=utf-8
import numpy as np
from scipy import integrate as intgrt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from math import sqrt
# 计算半球的体积
def ballVolume():
def halfBall(x, y):
return sqrt(1 - x**2 - y**2)
def halfCircle(x):
return sqrt(1 - x**2)
(vol, error) = intgrt.dblquad(halfBall, -1, 1, lambda x: -halfCircle(x), lambda x: halfCircle(x))
print ("vol =", vol)
# 对常微分方程组积分
# 计算洛伦茨吸引子的轨迹
def LorenzAttactor():
# 给出位置矢量 w 和三个参数 sigma rho beta 计算出速度矢量 dx dy dz
def lorenz(w, t, sigma, rho, beta):
(x, y, z) = w.tolist()
return (sigma * (y - x), x * (rho - z), x * y - beta * z)
t = np.arange(0, 20, 0.01) # 创建时间点
# 调用 ode 对 lorenz 进行求解 用两个不同的初始值
track1 = intgrt.odeint(lorenz, (0.0, 1.0, 0.0), t, args=(10.0, 28.0, 2.7))
track2 = intgrt.odeint(lorenz, (0.0, 1.01, 0.0), t, args=(10.0, 28.0, 2.7))
# 绘图
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(track1[:, 0], track1[:, 1], track1[:, 2], label="$y=1.0$")
ax.plot(track2[:, 0], track2[:, 1], track2[:, 2], label="$y=1.01$")
plt.legend(loc="best")
plt.show()
# 质量-弹簧-阻尼系统
# Mx'' + bx' + kx = F
def msd(xu, t, M, k, b, F):
(x, u) = xu.tolist()
dx = u
du = (F - k * x - b * u) / M
return (dx, du)
def msdDemo():
# 初始滑块在位移 x = -1.0 处 起始速度为 0 外部控制力恒为 1.0
initxu = (-1.0, 0.0)
(M, k, b, F) = (1.0, 0.5, 0.2, 1.0)
t = np.arange(0, 40, 0.02)
rst = intgrt.odeint(msd, initxu, t, args=(M, k, b, F))
(fig, (ax1, ax2)) = plt.subplots(2, 1)
ax1.plot(t, rst[:, 0], label=u"位移 x")
ax2.plot(t, rst[:, 1], label=u"速度 u")
ax1.legend(); ax2.legend()
plt.show()
# 质量-弹簧-阻尼系统
class MassSpringDamper(object):
def __init__(self, M, k, b, F):
(self.M, self.k, self.b, self.F) = (M, k, b, F)
# 求导函数
def dee(self, t, xu):
(x, u) = xu.tolist()
dx = u
du = (self.F - self.k * x - self.b * u) / self.M
return [dx, du] # 要求返回列表而不是元组
# 采用 PID 控制器
class PID(object):
def __init__(self, kp, ki, kd, dt):
(self.kp, self.ki, self.kd, self.dt) = (kp, ki, kd, dt)
self.lastErr = None
self.x = 0.0
def update(self, err):
p = self.kp * err
i = self.ki * self.x
if self.lastErr is None:
d = 0.0
else:
d = self.kd * (err - self.lastErr) / self.dt
self.x += err * self.dt
self.lastErr = err
return p + i + d
# 控制外力 F 使滑块更迅速地停止在位移 2.0 处
def msdPID(kp, ki, kd, dt):
stm = MassSpringDamper(M=1.0, k=0.5, b=0.2, F=1.0)
initxu = (-1.0, 0.0)
pid = PID(kp, ki, kd, dt)
r = intgrt.ode(stm.dee)
r.set_integrator("vode", method="bdf")
r.set_initial_value(initxu, 0)
t = list(); rst = list(); FArr = list()
while (r.successful() and (r.t + dt < 3)):
r.integrate(r.t + dt)
t.append(r.t)
rst.append(r.y)
err = 2.0 - r.y[0]
F = pid.update(err)
stm.F = F
FArr.append(F)
rst = np.array(rst)
t = np.array(t)
FArr = np.array(FArr)
(fig, (ax1, ax2, ax3)) = plt.subplots(3, 1)
ax1.plot(t, rst[:, 0], label=u"位移 x")
ax2.plot(t, rst[:, 1], label=u"速度 u")
ax3.plot(t, FArr, label=u"控制力 F")
ax1.legend(); ax2.legend(); ax3.legend()
plt.show()
if (__name__ == "__main__"):
# ballVolume()
LorenzAttactor()
# msdDemo()
# msdPID(19.29, 1.41, 6.25, 0.02) # 最优的一组数
| gpl-3.0 | -104,215,309,055,111,760 | 25.347458 | 98 | 0.577999 | false |
ubc/compair | compair/tests/algorithms/test_validity.py | 1 | 8324 | import json
import random
import math
import unittest
import os
import unicodecsv as csv
from enum import Enum
from data.fixtures.test_data import ComparisonTestData
from compair.models import Answer, Comparison, \
WinningAnswer, AnswerScore, AnswerCriterionScore, \
PairingAlgorithm, ScoringAlgorithm
from compair.tests.test_compair import ComPAIRAPITestCase
from compair.core import db
from compair.tests import test_app_settings
from compair import create_app
SKIP_VALIDITY_TEST = False
try:
from scipy.stats import spearmanr, pearsonr, kendalltau
import numpy
except:
SKIP_VALIDITY_TEST = True
class WinnerSelector(Enum):
always_correct = "always_correct"
correct_with_error = "correct_with_error"
guessing = "guessing"
closely_matched_errors = "closely_matched_errors"
class AlgorithmValidityTests(ComPAIRAPITestCase):
# def create_app(self):
# settings = test_app_settings.copy()
# settings['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+self._sqlite_db()
# app = create_app(settings_override=settings)
# return app
def setUp(self):
if SKIP_VALIDITY_TEST:
self.skipTest("scipy and numpy not installed. run `make deps`")
# remove existing sqlite db if exists
# self._delete_sqlite_db()
# TODO: Modify conditions to be more fuzzy (closely_matched_errors with 0.05 correct rate)
# Depends on results of research
super(AlgorithmValidityTests, self).setUp()
self.data = ComparisonTestData()
self.ACCEPTABLE_CORRELATION = 0.8
self.NUMBER_OF_ANSWERS = 40
self.WINNER_SELECTOR = WinnerSelector.always_correct
self.CORRECT_RATE = 1.0
self.MAX_COMPARSIONS = (self.NUMBER_OF_ANSWERS - 1) * (self.NUMBER_OF_ANSWERS - 2) / 2
self.TOTAL_MAX_ROUNDS = 6 # 3 comparisons per student
self.COMPARISONS_IN_ROUND = math.ceil(self.NUMBER_OF_ANSWERS / 2)
# stop after lowest of total comparisons possible or 100 rounds worth of comparisons are complete
self.TOTAL_MAX_COMPARISONS = min(
self.COMPARISONS_IN_ROUND * self.TOTAL_MAX_ROUNDS,
self.NUMBER_OF_ANSWERS * self.MAX_COMPARSIONS
)
self.course = self.data.create_course()
self.instructor = self.data.create_instructor()
self.data.enrol_instructor(self.instructor, self.course)
self.assignment = self.data.create_assignment_in_comparison_period(
self.course, self.instructor,
number_of_comparisons=self.MAX_COMPARSIONS,
scoring_algorithm=ScoringAlgorithm.elo,
pairing_algorithm=PairingAlgorithm.adaptive_min_delta
)
self.students = []
self.answers = []
self.grade_by_answer_uuid = {}
actual_grades = numpy.random.normal(0.78, 0.1, self.NUMBER_OF_ANSWERS)
for grade in actual_grades:
student = self.data.create_normal_user()
self.data.enrol_student(student, self.course)
self.students.append(student)
answer = self.data.create_answer(self.assignment, student, with_score=False)
self.answers.append(answer)
self.grade_by_answer_uuid[answer.uuid] = grade
self.base_url = self._build_url(self.course.uuid, self.assignment.uuid)
db.session.commit()
# def tearDown(self):
# self._delete_sqlite_db()
# def _sqlite_db(self):
# return 'test_comparison'+str(os.getpid())+'.db'
# def _delete_sqlite_db(self):
# file_path = os.path.join(os.getcwd(), 'compair', self._sqlite_db())
# if os.path.isfile(file_path):
# try:
# os.remove(file_path)
# except Exception as e:
# print(e)
def _decide_winner(self, answer1_uuid, answer2_uuid):
answer1_grade = self.grade_by_answer_uuid[answer1_uuid]
answer2_grade = self.grade_by_answer_uuid[answer2_uuid]
if self.WINNER_SELECTOR == WinnerSelector.always_correct:
return self.always_correct(answer1_grade, answer2_grade)
elif self.WINNER_SELECTOR == WinnerSelector.guessing:
return self.guessing()
elif self.WINNER_SELECTOR == WinnerSelector.correct_with_error:
return self.correct_with_error(answer1_grade, answer2_grade, self.CORRECT_RATE)
elif self.WINNER_SELECTOR == WinnerSelector.closely_matched_errors:
return self.closely_matched_errors(answer1_grade, answer2_grade, self.CORRECT_RATE)
else:
raise Exception()
def always_correct(self, value1, value2):
return self.correct_with_error(value1, value2, 1.0)
def correct_with_error(self, value1, value2, correct_rate):
if value1 == value2:
return self.guessing()
correct_answer = WinningAnswer.answer1 if value1 > value2 else WinningAnswer.answer2
incorrect_answer = WinningAnswer.answer1 if value1 < value2 else WinningAnswer.answer2
return correct_answer if random.random() <= correct_rate else incorrect_answer
def guessing(self):
return WinningAnswer.answer1 if random.random() <= 0.5 else WinningAnswer.answer2
def closely_matched_errors(self, value1, value2, sigma):
# make the actual values of answers fuzzy (represents perceived value errors)
fuzzy_value1 = numpy.random.normal(value1, sigma, 1)[0]
fuzzy_value2 = numpy.random.normal(value2, sigma, 1)[0]
# return the correct winner using fuzzy perceived values
return self.always_correct(fuzzy_value1, fuzzy_value2)
def _build_url(self, course_uuid, assignment_uuid, tail=""):
url = '/api/courses/' + course_uuid + '/assignments/' + assignment_uuid + '/comparisons' + tail
return url
def _build_comparison_submit(self, winner, draft=False):
submit = {
'comparison_criteria': [],
'draft': draft
}
for criterion in self.assignment.criteria:
submit['comparison_criteria'].append({
'criterion_id': criterion.uuid,
'winner': winner,
'content': None
})
return submit
def test_random_students_perform_comparisons(self):
self.student_comparison_count = {
student.id: 0 for student in self.students
}
comparison_count = 0
round_count = 0
r_value = None
while comparison_count < self.TOTAL_MAX_COMPARISONS:
# select a random student to answer
student = random.choice(self.students)
with self.login(student.username):
# perform selection algorithm
rv = self.client.get(self.base_url)
self.assert200(rv)
winner = self._decide_winner(rv.json['comparison']['answer1_id'], rv.json['comparison']['answer2_id'])
comparison_submit = self._build_comparison_submit(winner.value)
rv = self.client.post(self.base_url, data=json.dumps(comparison_submit), content_type='application/json')
self.assert200(rv)
comparison_count += 1
# remove students who have completed all comparisons
self.student_comparison_count[student.id] += 1
if self.student_comparison_count[student.id] >= self.MAX_COMPARSIONS:
indexes = [i for i, s in enumerate(self.students) if student.id == s.id]
del self.students[indexes[0]]
if comparison_count % self.COMPARISONS_IN_ROUND == 0:
round_count += 1
actual_grades = []
current_scores = []
for answer in self.answers:
answer_score = AnswerScore.query.filter_by(answer_id=answer.id).first()
if answer_score:
current_scores.append(answer_score.score)
actual_grades.append(self.grade_by_answer_uuid[answer.uuid])
r_value, pearsonr_p_value = pearsonr(actual_grades, current_scores)
if r_value >= self.ACCEPTABLE_CORRELATION:
break
self.assertGreaterEqual(r_value, self.ACCEPTABLE_CORRELATION)
self.assertLessEqual(round_count, self.TOTAL_MAX_ROUNDS) | gpl-3.0 | -8,371,443,802,166,845,000 | 40.009852 | 121 | 0.634431 | false |
PythonScanClient/PyScanClient | example/opi/scripts/xy_scan.py | 1 | 2047 | """
Schedule scan with parameters from BOY script
@author: Kay Kasemir
"""
from org.csstudio.scan.ui import SimulationDisplay
from org.csstudio.scan.server import SimulationResult
from org.eclipse.ui import PlatformUI
from errors import showException
from scan.commands.loop import Loop
from scan.commands.wait import Wait
from scan.commands.log import Log
from beamline_setup import scan_client
try:
# Fetch parameters from display
x0 = float(display.getWidget("x0").getValue())
x1 = float(display.getWidget("x1").getValue())
dx = float(display.getWidget("dx").getValue())
y0 = float(display.getWidget("y0").getValue())
y1 = float(display.getWidget("y1").getValue())
dy = float(display.getWidget("dy").getValue())
neutrons = float(display.getWidget("neutrons").getValue())
simu = str(display.getWidget("simu").getValue()) == "True"
if str(display.getWidget("updown").getValue()) == "True":
toggle = -1
else:
toggle = 1
#from org.eclipse.jface.dialogs import MessageDialog
#MessageDialog.openWarning(
# None, "Type", "Type is " + neutrons.__class__.__name__)
# Create scan
cmds =[
Loop('xpos', min(x0, x1), max(x0, x1), max(0.1, abs(dx)),
Loop('ypos', min(y0, y1), max(y0, y1), toggle * max(0.1, abs(dy)),
[
Wait('neutrons', neutrons, comparison='increase by'),
Log('xpos', 'ypos', 'readback')
]
)
)
]
if simu:
simulation = scan_client.simulate(cmds)
SimulationDisplay.show(SimulationResult(simulation['seconds'], simulation['simulation']))
else:
# Submit scan
id = scan_client.submit(cmds, "XY Scan")
workbench = PlatformUI.getWorkbench()
window = workbench.getActiveWorkbenchWindow()
page = window.getActivePage()
plot = page.showView("org.csstudio.scan.ui.plot.view")
plot.selectScan("XY Scan", id)
plot.selectDevices("xpos", "ypos")
except:
showException("XY Scan")
| epl-1.0 | -92,103,422,554,939,620 | 31.492063 | 97 | 0.634587 | false |
fiber-space/pip | pip/_vendor/cachecontrol/caches/file_cache.py | 1 | 4069 | import hashlib
import os
from textwrap import dedent
from ..cache import BaseCache
from ..controller import CacheController
try:
FileNotFoundError
except NameError:
# py2.X
FileNotFoundError = IOError
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700, use_dir_lock=None, lock_class=None):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
try:
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
except ImportError:
notice = dedent("""
NOTE: In order to use the FileCache you must have
lockfile installed. You can install it via pip:
pip install lockfile
""")
raise ImportError(notice)
else:
if use_dir_lock:
lock_class = MkdirLockFile
elif lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
try:
os.remove(name)
except FileNotFoundError:
pass
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
| mit | -2,711,897,813,747,140,600 | 29.593985 | 79 | 0.606783 | false |
ryepdx/account_payment_cim_authdotnet | xml2dic.py | 1 | 1080 | ##Module that converts the Xml response to dictionary
from lxml import etree
import re
def dictlist(node):
res = {}
node_tag = re.findall(r'}(\w*)', node.tag)
node_tag = node_tag[0]
res[node_tag] = []
xmltodict(node, res[node_tag])
reply = {}
reply[node_tag] = res[node_tag]
return reply
def xmltodict(node, res):
rep = {}
node_tag = re.findall(r'}(\w*)', node.tag)
node_tag = node_tag[0]
if len(node):
#n = 0
for n in list(node):
rep[node_tag] = []
value = xmltodict(n, rep[node_tag])
if len(n):
n_tag = re.findall(r'}(\w*)', n.tag)
n_tag = n_tag[0]
value = rep[node_tag]
res.append({n_tag:value})
else :
res.append(rep[node_tag][0])
else:
value = {}
value = node.text
res.append({node_tag:value})
return
def main(xml_string):
tree = etree.fromstring(xml_string)
res = dictlist(tree)
return res
if __name__ == '__main__' :
main()
| agpl-3.0 | -254,882,252,893,235,300 | 21.5 | 53 | 0.503704 | false |
schaabs/sandbox | net/sandbox.keyvault/python/repl/key_vault_crypto.py | 1 | 1939 | import base64
import datetime
import sys
import argparse
from azure.keyvault.generated.models import KeyVaultErrorException
from python.key_vault_agent import KeyVaultAgent
from azure.keyvault.generated import KeyVaultClient
CLIENT_ID = '8fd4d3c4-efea-49aa-b1de-2c33c22da56e'
class KeyVaultCryptoAgent(KeyVaultAgent):
def __init__(self, client_id):
self._initialize(client_id)
def encrypt(self, f_in, f_out, vault_name, key_name, key_version=None):
vault = self.get_vault(vault_name)
buff = f_in.read()
buff = base64.encodebytes(buff)
buff = buff.replace(b'\n', b'')
try:
buff = self.data_client.encrypt(vault.properties.vault_uri, key_name, key_version or '', 'RSA1_5', buff)
except KeyVaultErrorException as e:
print(str(e))
buff = base64.decodebytes(buff)
f_out.write(buff)
def _parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['encrypt', 'decrypt'], help='specifies whether to encrypt or decrypt the specified "in" file')
parser.add_argument('infile', type=argparse.FileType('rb'), help='specifies the file on which to preform the crypto action')
parser.add_argument('outfile', type=argparse.FileType('wb'), help='specifies the file in which to store the crypto action result')
parser.add_argument('vault', help='the key to use for the crypto action')
parser.add_argument('key', help='the key to use for the crypto action')
return parser.parse_args(argv)
def main(argv):
argv = ['', 'encrypt', 'd:\\temp\\crypto_encrypt_in.txt', 'd:\\temp\\crypto_encrypt_out.txt', 'sdschaab-replkv', 'repl-key1']
args = _parse_args(argv[1:])
crypto_agent = KeyVaultCryptoAgent(CLIENT_ID)
if args.action == 'encrypt':
crypto_agent.encrypt(args.infile, args.outfile, args.vault, args.key)
if __name__ == '__main__':
main(sys.argv)
| mit | 1,837,548,955,547,874,800 | 33.017544 | 137 | 0.680248 | false |
smartschat/cort | cort/coreference/experiments.py | 1 | 3556 | """ Manage learning from training data and making predictions on test data. """
import logging
__author__ = 'smartschat'
def learn(training_corpus, instance_extractor, perceptron):
""" Learn a model for coreference resolution from training data.
In particular, apply an instance/feature extractor to a training corpus and
employ a machine learning model to learn a weight vector from these
instances.
Args:
training_corpus (Corpus): The corpus to learn from.
instance_extractor (InstanceExtracor): The instance extractor that
defines the features and the structure of instances that are
extracted during training.
perceptron (Perceptron): A perceptron (including a decoder) that
learns from the instances extracted by ``instance_extractor``.
Returns:
A tuple consisting of
- **priors** (*dict(str,float)*): A prior weight for each label
in the graphs representing the instances,
- **weights** (*dict(str, array)*): A mapping of labels to weight
vectors. For each label ``l``, ``weights[l]`` contains weights
for each feature seen during training (for representing the
features we employ *feature hashing*). If the graphs employed are
not labeled, ``l`` is set to "+".
"""
logging.info("Learning.")
logging.info("\tExtracting instances and features.")
substructures, arc_information = instance_extractor.extract(
training_corpus)
logging.info("\tFitting model parameters.")
perceptron.fit(substructures, arc_information)
return perceptron.get_model()
def predict(testing_corpus,
instance_extractor,
perceptron,
coref_extractor):
""" According to a learned model, predict coreference information.
Args:
testing_corpus (Corpus): The corpus to predict coreference on.
instance_extractor (InstanceExtracor): The instance extracor that
defines the features and the structure of instances that are
extracted during testing.
perceptron (Perceptron): A perceptron learned from training data.
argmax_function (function): A decoder that computes the best-scoring
coreference structure over a set of structures.
coref_extractor (function): An extractor for consolidating pairwise
predictions into coreference clusters.
Returns:
A tuple containing two dicts. The components are
- **mention_entity_mapping** (*dict(Mention, int)*): A mapping of
mentions to entity identifiers.
- **antecedent_mapping** (*dict(Mention, Mention)*): A mapping of
mentions to their antecedent (as determined by the
``coref_extractor``).
"""
logging.info("Predicting.")
logging.info("\tRemoving coreference annotations from corpus.")
for doc in testing_corpus:
doc.antecedent_decisions = {}
for mention in doc.system_mentions:
mention.attributes["antecedent"] = None
mention.attributes["set_id"] = None
logging.info("\tExtracting instances and features.")
substructures, arc_information = instance_extractor.extract(testing_corpus)
logging.info("\tDoing predictions.")
arcs, labels, scores = perceptron.predict(substructures, arc_information)
logging.info("\tClustering results.")
return coref_extractor(arcs, labels, scores, perceptron.get_coref_labels())
| mit | -2,677,525,820,870,736,000 | 38.076923 | 79 | 0.66676 | false |
Catgroove/dotaninja | app/filters.py | 1 | 1483 | from app import app
from flask import url_for
from .models import Player, DoesNotExist
from .helpers import json_file_to_dict
from config import JSON_DIR
import datetime
import arrow
import os
@app.template_filter("game_mode")
def game_mode(mode_id):
return json_file_to_dict(os.path.join(JSON_DIR, "game_mode.json"))[str(mode_id)]["name"]
@app.template_filter("region")
def region(cluster):
regions = json_file_to_dict(os.path.join(JSON_DIR, "regions.json"))["regions"]
for region, values in regions.items():
if values.get("clusters") and str(cluster) in values.get("clusters"):
return (values["display_name"][len("#dota_region_"):].capitalize())
@app.template_filter("duration")
def duration(duration):
return str(datetime.timedelta(seconds=duration))
@app.template_filter("time_since")
def time_since(time):
return arrow.get(time).humanize()
@app.template_filter("result")
def result(result):
if result:
return "Won"
return "Lost"
@app.template_filter("hero_image")
def hero_image(hero_id):
return url_for("static", filename="assets/heroes/{}_sb.png".format(hero_id))
@app.template_filter("item_image")
def item_image(item_id):
return url_for("static", filename="assets/items/{}_lg.png".format(item_id))
@app.template_filter("player_name")
def player_name(account_id):
try:
return Player.get(Player.account_id == account_id).personaname
except DoesNotExist:
return account_id
| gpl-3.0 | 3,359,745,822,291,278,000 | 25.482143 | 92 | 0.696561 | false |
ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/lib/flashwin_old.py | 1 | 19484 | #----------------------------------------------------------------------
# Name: wx.lib.flashwin
# Purpose: A class that allows the use of the Shockwave Flash
# ActiveX control
#
# Author: Robin Dunn
#
# Created: 22-March-2004
# RCS-ID: $Id: flashwin.py 26301 2004-03-23 05:29:50Z RD $
# Copyright: (c) 2004 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
# This module was generated by the wx.activex.GernerateAXModule class
# (See also the genaxmodule script.)
import wx
import wx.activex
clsID = '{D27CDB6E-AE6D-11CF-96B8-444553540000}'
progID = 'ShockwaveFlash.ShockwaveFlash.1'
# Create eventTypes and event binders
wxEVT_ReadyStateChange = wx.activex.RegisterActiveXEvent('OnReadyStateChange')
wxEVT_Progress = wx.activex.RegisterActiveXEvent('OnProgress')
wxEVT_FSCommand = wx.activex.RegisterActiveXEvent('FSCommand')
EVT_ReadyStateChange = wx.PyEventBinder(wxEVT_ReadyStateChange, 1)
EVT_Progress = wx.PyEventBinder(wxEVT_Progress, 1)
EVT_FSCommand = wx.PyEventBinder(wxEVT_FSCommand, 1)
# Derive a new class from ActiveXWindow
class FlashWindow(wx.activex.ActiveXWindow):
def __init__(self, parent, ID=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0, name='FlashWindow'):
wx.activex.ActiveXWindow.__init__(self, parent,
wx.activex.CLSID('{D27CDB6E-AE6D-11CF-96B8-444553540000}'),
ID, pos, size, style, name)
# Methods exported by the ActiveX object
def QueryInterface(self, riid):
return self.CallAXMethod('QueryInterface', riid)
def AddRef(self):
return self.CallAXMethod('AddRef')
def Release(self):
return self.CallAXMethod('Release')
def GetTypeInfoCount(self):
return self.CallAXMethod('GetTypeInfoCount')
def GetTypeInfo(self, itinfo, lcid):
return self.CallAXMethod('GetTypeInfo', itinfo, lcid)
def GetIDsOfNames(self, riid, rgszNames, cNames, lcid):
return self.CallAXMethod('GetIDsOfNames', riid, rgszNames, cNames, lcid)
def Invoke(self, dispidMember, riid, lcid, wFlags, pdispparams):
return self.CallAXMethod('Invoke', dispidMember, riid, lcid, wFlags, pdispparams)
def SetZoomRect(self, left, top, right, bottom):
return self.CallAXMethod('SetZoomRect', left, top, right, bottom)
def Zoom(self, factor):
return self.CallAXMethod('Zoom', factor)
def Pan(self, x, y, mode):
return self.CallAXMethod('Pan', x, y, mode)
def Play(self):
return self.CallAXMethod('Play')
def Stop(self):
return self.CallAXMethod('Stop')
def Back(self):
return self.CallAXMethod('Back')
def Forward(self):
return self.CallAXMethod('Forward')
def Rewind(self):
return self.CallAXMethod('Rewind')
def StopPlay(self):
return self.CallAXMethod('StopPlay')
def GotoFrame(self, FrameNum):
return self.CallAXMethod('GotoFrame', FrameNum)
def CurrentFrame(self):
return self.CallAXMethod('CurrentFrame')
def IsPlaying(self):
return self.CallAXMethod('IsPlaying')
def PercentLoaded(self):
return self.CallAXMethod('PercentLoaded')
def FrameLoaded(self, FrameNum):
return self.CallAXMethod('FrameLoaded', FrameNum)
def FlashVersion(self):
return self.CallAXMethod('FlashVersion')
def LoadMovie(self, layer, url):
return self.CallAXMethod('LoadMovie', layer, url)
def TGotoFrame(self, target, FrameNum):
return self.CallAXMethod('TGotoFrame', target, FrameNum)
def TGotoLabel(self, target, label):
return self.CallAXMethod('TGotoLabel', target, label)
def TCurrentFrame(self, target):
return self.CallAXMethod('TCurrentFrame', target)
def TCurrentLabel(self, target):
return self.CallAXMethod('TCurrentLabel', target)
def TPlay(self, target):
return self.CallAXMethod('TPlay', target)
def TStopPlay(self, target):
return self.CallAXMethod('TStopPlay', target)
def SetVariable(self, name, value):
return self.CallAXMethod('SetVariable', name, value)
def GetVariable(self, name):
return self.CallAXMethod('GetVariable', name)
def TSetProperty(self, target, property, value):
return self.CallAXMethod('TSetProperty', target, property, value)
def TGetProperty(self, target, property):
return self.CallAXMethod('TGetProperty', target, property)
def TCallFrame(self, target, FrameNum):
return self.CallAXMethod('TCallFrame', target, FrameNum)
def TCallLabel(self, target, label):
return self.CallAXMethod('TCallLabel', target, label)
def TSetPropertyNum(self, target, property, value):
return self.CallAXMethod('TSetPropertyNum', target, property, value)
def TGetPropertyNum(self, target, property):
return self.CallAXMethod('TGetPropertyNum', target, property)
def TGetPropertyAsNumber(self, target, property):
return self.CallAXMethod('TGetPropertyAsNumber', target, property)
# Getters, Setters and properties
def _get_ReadyState(self):
return self.GetAXProp('ReadyState')
readystate = property(_get_ReadyState, None)
def _get_TotalFrames(self):
return self.GetAXProp('TotalFrames')
totalframes = property(_get_TotalFrames, None)
def _get_Playing(self):
return self.GetAXProp('Playing')
def _set_Playing(self, Playing):
self.SetAXProp('Playing', Playing)
playing = property(_get_Playing, _set_Playing)
def _get_Quality(self):
return self.GetAXProp('Quality')
def _set_Quality(self, Quality):
self.SetAXProp('Quality', Quality)
quality = property(_get_Quality, _set_Quality)
def _get_ScaleMode(self):
return self.GetAXProp('ScaleMode')
def _set_ScaleMode(self, ScaleMode):
self.SetAXProp('ScaleMode', ScaleMode)
scalemode = property(_get_ScaleMode, _set_ScaleMode)
def _get_AlignMode(self):
return self.GetAXProp('AlignMode')
def _set_AlignMode(self, AlignMode):
self.SetAXProp('AlignMode', AlignMode)
alignmode = property(_get_AlignMode, _set_AlignMode)
def _get_BackgroundColor(self):
return self.GetAXProp('BackgroundColor')
def _set_BackgroundColor(self, BackgroundColor):
self.SetAXProp('BackgroundColor', BackgroundColor)
backgroundcolor = property(_get_BackgroundColor, _set_BackgroundColor)
def _get_Loop(self):
return self.GetAXProp('Loop')
def _set_Loop(self, Loop):
self.SetAXProp('Loop', Loop)
loop = property(_get_Loop, _set_Loop)
def _get_Movie(self):
return self.GetAXProp('Movie')
def _set_Movie(self, Movie):
self.SetAXProp('Movie', Movie)
movie = property(_get_Movie, _set_Movie)
def _get_FrameNum(self):
return self.GetAXProp('FrameNum')
def _set_FrameNum(self, FrameNum):
self.SetAXProp('FrameNum', FrameNum)
framenum = property(_get_FrameNum, _set_FrameNum)
def _get_WMode(self):
return self.GetAXProp('WMode')
def _set_WMode(self, WMode):
self.SetAXProp('WMode', WMode)
wmode = property(_get_WMode, _set_WMode)
def _get_SAlign(self):
return self.GetAXProp('SAlign')
def _set_SAlign(self, SAlign):
self.SetAXProp('SAlign', SAlign)
salign = property(_get_SAlign, _set_SAlign)
def _get_Menu(self):
return self.GetAXProp('Menu')
def _set_Menu(self, Menu):
self.SetAXProp('Menu', Menu)
menu = property(_get_Menu, _set_Menu)
def _get_Base(self):
return self.GetAXProp('Base')
def _set_Base(self, Base):
self.SetAXProp('Base', Base)
base = property(_get_Base, _set_Base)
def _get_Scale(self):
return self.GetAXProp('Scale')
def _set_Scale(self, Scale):
self.SetAXProp('Scale', Scale)
scale = property(_get_Scale, _set_Scale)
def _get_DeviceFont(self):
return self.GetAXProp('DeviceFont')
def _set_DeviceFont(self, DeviceFont):
self.SetAXProp('DeviceFont', DeviceFont)
devicefont = property(_get_DeviceFont, _set_DeviceFont)
def _get_EmbedMovie(self):
return self.GetAXProp('EmbedMovie')
def _set_EmbedMovie(self, EmbedMovie):
self.SetAXProp('EmbedMovie', EmbedMovie)
embedmovie = property(_get_EmbedMovie, _set_EmbedMovie)
def _get_BGColor(self):
return self.GetAXProp('BGColor')
def _set_BGColor(self, BGColor):
self.SetAXProp('BGColor', BGColor)
bgcolor = property(_get_BGColor, _set_BGColor)
def _get_Quality2(self):
return self.GetAXProp('Quality2')
def _set_Quality2(self, Quality2):
self.SetAXProp('Quality2', Quality2)
quality2 = property(_get_Quality2, _set_Quality2)
def _get_SWRemote(self):
return self.GetAXProp('SWRemote')
def _set_SWRemote(self, SWRemote):
self.SetAXProp('SWRemote', SWRemote)
swremote = property(_get_SWRemote, _set_SWRemote)
def _get_FlashVars(self):
return self.GetAXProp('FlashVars')
def _set_FlashVars(self, FlashVars):
self.SetAXProp('FlashVars', FlashVars)
flashvars = property(_get_FlashVars, _set_FlashVars)
def _get_AllowScriptAccess(self):
return self.GetAXProp('AllowScriptAccess')
def _set_AllowScriptAccess(self, AllowScriptAccess):
self.SetAXProp('AllowScriptAccess', AllowScriptAccess)
allowscriptaccess = property(_get_AllowScriptAccess, _set_AllowScriptAccess)
def _get_MovieData(self):
return self.GetAXProp('MovieData')
def _set_MovieData(self, MovieData):
self.SetAXProp('MovieData', MovieData)
moviedata = property(_get_MovieData, _set_MovieData)
# PROPERTIES
# --------------------
# readystate
# type:int arg:VT_EMPTY canGet:True canSet:False
#
# totalframes
# type:int arg:VT_EMPTY canGet:True canSet:False
#
# playing
# type:bool arg:bool canGet:True canSet:True
#
# quality
# type:int arg:int canGet:True canSet:True
#
# scalemode
# type:int arg:int canGet:True canSet:True
#
# alignmode
# type:int arg:int canGet:True canSet:True
#
# backgroundcolor
# type:int arg:int canGet:True canSet:True
#
# loop
# type:bool arg:bool canGet:True canSet:True
#
# movie
# type:string arg:string canGet:True canSet:True
#
# framenum
# type:int arg:int canGet:True canSet:True
#
# wmode
# type:string arg:string canGet:True canSet:True
#
# salign
# type:string arg:string canGet:True canSet:True
#
# menu
# type:bool arg:bool canGet:True canSet:True
#
# base
# type:string arg:string canGet:True canSet:True
#
# scale
# type:string arg:string canGet:True canSet:True
#
# devicefont
# type:bool arg:bool canGet:True canSet:True
#
# embedmovie
# type:bool arg:bool canGet:True canSet:True
#
# bgcolor
# type:string arg:string canGet:True canSet:True
#
# quality2
# type:string arg:string canGet:True canSet:True
#
# swremote
# type:string arg:string canGet:True canSet:True
#
# flashvars
# type:string arg:string canGet:True canSet:True
#
# allowscriptaccess
# type:string arg:string canGet:True canSet:True
#
# moviedata
# type:string arg:string canGet:True canSet:True
#
#
#
#
# METHODS
# --------------------
# QueryInterface
# retType: VT_VOID
# params:
# riid
# in:True out:False optional:False type:unsupported type 29
# ppvObj
# in:False out:True optional:False type:unsupported type 26
#
# AddRef
# retType: int
#
# Release
# retType: int
#
# GetTypeInfoCount
# retType: VT_VOID
# params:
# pctinfo
# in:False out:True optional:False type:int
#
# GetTypeInfo
# retType: VT_VOID
# params:
# itinfo
# in:True out:False optional:False type:int
# lcid
# in:True out:False optional:False type:int
# pptinfo
# in:False out:True optional:False type:unsupported type 26
#
# GetIDsOfNames
# retType: VT_VOID
# params:
# riid
# in:True out:False optional:False type:unsupported type 29
# rgszNames
# in:True out:False optional:False type:unsupported type 26
# cNames
# in:True out:False optional:False type:int
# lcid
# in:True out:False optional:False type:int
# rgdispid
# in:False out:True optional:False type:int
#
# Invoke
# retType: VT_VOID
# params:
# dispidMember
# in:True out:False optional:False type:int
# riid
# in:True out:False optional:False type:unsupported type 29
# lcid
# in:True out:False optional:False type:int
# wFlags
# in:True out:False optional:False type:int
# pdispparams
# in:True out:False optional:False type:unsupported type 29
# pvarResult
# in:False out:True optional:False type:VT_VARIANT
# pexcepinfo
# in:False out:True optional:False type:unsupported type 29
# puArgErr
# in:False out:True optional:False type:int
#
# SetZoomRect
# retType: VT_VOID
# params:
# left
# in:True out:False optional:False type:int
# top
# in:True out:False optional:False type:int
# right
# in:True out:False optional:False type:int
# bottom
# in:True out:False optional:False type:int
#
# Zoom
# retType: VT_VOID
# params:
# factor
# in:True out:False optional:False type:int
#
# Pan
# retType: VT_VOID
# params:
# x
# in:True out:False optional:False type:int
# y
# in:True out:False optional:False type:int
# mode
# in:True out:False optional:False type:int
#
# Play
# retType: VT_VOID
#
# Stop
# retType: VT_VOID
#
# Back
# retType: VT_VOID
#
# Forward
# retType: VT_VOID
#
# Rewind
# retType: VT_VOID
#
# StopPlay
# retType: VT_VOID
#
# GotoFrame
# retType: VT_VOID
# params:
# FrameNum
# in:True out:False optional:False type:int
#
# CurrentFrame
# retType: int
#
# IsPlaying
# retType: bool
#
# PercentLoaded
# retType: int
#
# FrameLoaded
# retType: bool
# params:
# FrameNum
# in:True out:False optional:False type:int
#
# FlashVersion
# retType: int
#
# LoadMovie
# retType: VT_VOID
# params:
# layer
# in:True out:False optional:False type:int
# url
# in:True out:False optional:False type:string
#
# TGotoFrame
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# FrameNum
# in:True out:False optional:False type:int
#
# TGotoLabel
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# label
# in:True out:False optional:False type:string
#
# TCurrentFrame
# retType: int
# params:
# target
# in:True out:False optional:False type:string
#
# TCurrentLabel
# retType: string
# params:
# target
# in:True out:False optional:False type:string
#
# TPlay
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
#
# TStopPlay
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
#
# SetVariable
# retType: VT_VOID
# params:
# name
# in:True out:False optional:False type:string
# value
# in:True out:False optional:False type:string
#
# GetVariable
# retType: string
# params:
# name
# in:True out:False optional:False type:string
#
# TSetProperty
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
# value
# in:True out:False optional:False type:string
#
# TGetProperty
# retType: string
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
#
# TCallFrame
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# FrameNum
# in:True out:False optional:False type:int
#
# TCallLabel
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# label
# in:True out:False optional:False type:string
#
# TSetPropertyNum
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
# value
# in:True out:False optional:False type:double
#
# TGetPropertyNum
# retType: double
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
#
# TGetPropertyAsNumber
# retType: double
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
#
#
#
#
# EVENTS
# --------------------
# ReadyStateChange
# retType: VT_VOID
# params:
# newState
# in:False out:False optional:False type:int
#
# Progress
# retType: VT_VOID
# params:
# percentDone
# in:False out:False optional:False type:int
#
# FSCommand
# retType: VT_VOID
# params:
# command
# in:True out:False optional:False type:string
# args
# in:True out:False optional:False type:string
#
#
#
#
| mit | -5,230,802,995,228,288,000 | 27.883436 | 89 | 0.582991 | false |
proversity-org/edx-platform | cms/djangoapps/contentstore/features/video.py | 1 | 2082 | # pylint: disable=missing-docstring
from lettuce import step, world
SELECTORS = {
'spinner': '.video-wrapper .spinner',
'controls': '.video-controls',
}
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@step('I have uploaded subtitles "([^"]*)"$')
def i_have_uploaded_subtitles(_step, sub_id):
_step.given('I go to the files and uploads page')
_step.given('I upload the test file "subs_{}.srt.sjson"'.format(sub_id.strip()))
@step('I have created a Video component$')
def i_created_a_video_component(step):
step.given('I am in Studio editing a new unit')
world.create_component_instance(
step=step,
category='video',
)
world.wait_for_xmodule()
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait(DELAY)
world.wait_for_invisible(SELECTORS['spinner'])
if not world.youtube.config.get('youtube_api_blocked'):
world.wait_for_visible(SELECTORS['controls'])
@step('I have created a Video component with subtitles$')
def i_created_a_video_with_subs(_step):
_step.given('I have created a Video component with subtitles "tPccVs9bg0c"')
@step('I have created a Video component with subtitles "([^"]*)"$')
def i_created_a_video_with_subs_with_name(_step, sub_id):
_step.given('I have created a Video component')
# Store the current URL so we can return here
video_url = world.browser.url
# Upload subtitles for the video using the upload interface
_step.given('I have uploaded subtitles "{}"'.format(sub_id))
# Return to the video
world.visit(video_url)
world.wait_for_xmodule()
# update .sub filed with proper subs name (which mimics real Studio/XML behavior)
# this is needed only for that videos which are created in acceptance tests.
_step.given('I edit the component')
world.wait_for_ajax_complete()
_step.given('I save changes')
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait_for_invisible(SELECTORS['spinner'])
| agpl-3.0 | -6,644,738,434,728,490,000 | 30.074627 | 85 | 0.686359 | false |
Mokona/python-p4lib | test/mocked/p4lib_describe_test.py | 1 | 3184 | import unittest
import p4lib
from mock23 import Mock
from test_utils import change_stdout, test_options, test_raw_result
CHANGE_NUM = 1234
USER = "someuser"
CLIENT = "someclient"
DATE = "2014/11/01"
DESCRIPTION = "Some changelist description"
FILE_0 = "//depot/file.cpp"
REV_0 = 3
ACTION_0 = "edit"
FILE_1 = "//depot/file2.cpp"
REV_1 = 4
ACTION_1 = "edit"
DESCRIBE_OUTPUT_BASE = """Change %i by %s@%s on %s
\t%s
Affected files ...
... %s#%i %s
... %s#%i %s
""" % (CHANGE_NUM, USER, CLIENT, DATE,
DESCRIPTION,
FILE_0, REV_0, ACTION_0,
FILE_1, REV_1, ACTION_1)
DESCRIBE_OUTPUT = DESCRIBE_OUTPUT_BASE + """
"""
DESCRIBE_OUTPUT_LONG = DESCRIBE_OUTPUT_BASE + """
Differences ...
==== //depot/apps/px/ReadMe.txt#5 (text/utf8) ====
DiffLine1
"""
DESCRIBE_OUTPUT_MOVE_DELETE = DESCRIBE_OUTPUT_BASE + """
Moved files ...
... //depot/file1.cpp#1 moved from ... //depot/file2.cpp#1
Differences ...
"""
class DescribeTestCase(unittest.TestCase):
def setUp(self):
p4lib._run = Mock(spec='p4lib._run', return_value=("", "", 0))
def _common_asserts(self, result):
self.assertEqual(CHANGE_NUM, result["change"])
self.assertEqual(DESCRIPTION, result["description"])
self.assertEqual(USER, result["user"])
self.assertEqual(CLIENT, result["client"])
self.assertIn("files", result)
files = result["files"]
self.assertEqual(2, len(files))
file_0 = files[0]
self.assertEqual(FILE_0, file_0["depotFile"])
self.assertEqual(REV_0, file_0["rev"])
self.assertEqual(ACTION_0, file_0["action"])
file_1 = files[1]
self.assertEqual(FILE_1, file_1["depotFile"])
self.assertEqual(REV_1, file_1["rev"])
self.assertEqual(ACTION_1, file_1["action"])
def test_with_change_short_form(self):
change_stdout(DESCRIBE_OUTPUT)
p4 = p4lib.P4()
result = p4.describe(change=CHANGE_NUM, shortForm=True)
p4lib._run.assert_called_with(['p4', 'describe', '-s', '1234'])
self._common_asserts(result)
self.assertNotIn("diff", result)
def test_with_change_long_form(self):
change_stdout(DESCRIBE_OUTPUT_LONG)
p4 = p4lib.P4()
result = p4.describe(change=CHANGE_NUM)
p4lib._run.assert_called_with(['p4', 'describe', '1234'])
self._common_asserts(result)
self.assertIn("diff", result)
def test_with_change_long_form_with_move_delete(self):
change_stdout(DESCRIBE_OUTPUT_MOVE_DELETE)
p4 = p4lib.P4()
result = p4.describe(change=CHANGE_NUM)
p4lib._run.assert_called_with(['p4', 'describe', '1234'])
self._common_asserts(result)
self.assertIn("diff", result)
def test_raw_result(self):
test_raw_result(self, DESCRIBE_OUTPUT_LONG, "describe",
change=CHANGE_NUM)
p4 = p4lib.P4()
result = p4.describe(change=CHANGE_NUM)
self._common_asserts(result)
def test_with_options(self):
change_stdout(DESCRIBE_OUTPUT_LONG)
test_options(self, "describe", change=CHANGE_NUM,
expected=["describe", "1234"])
| mit | -1,705,442,480,516,141,300 | 24.886179 | 71 | 0.611809 | false |
phillynch7/sportsref | sportsref/nba/seasons.py | 1 | 9902 | from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range, zip
from past.utils import old_div
import urllib.parse
import future
import future.utils
import numpy as np
import pandas as pd
from pyquery import PyQuery as pq
import sportsref
class Season(future.utils.with_metaclass(sportsref.decorators.Cached, object)):
"""Object representing a given NBA season."""
def __init__(self, year):
"""Initializes a Season object for an NBA season.
:year: The year of the season we want.
"""
self.yr = int(year)
def __eq__(self, other):
return (self.yr == other.yr)
def __hash__(self):
return hash(self.yr)
def __repr__(self):
return 'Season({})'.format(self.yr)
def _subpage_url(self, page):
return (sportsref.nba.BASE_URL +
'/leagues/NBA_{}_{}.html'.format(self.yr, page))
@sportsref.decorators.memoize
def get_main_doc(self):
"""Returns PyQuery object for the main season URL.
:returns: PyQuery object.
"""
url = (sportsref.nba.BASE_URL +
'/leagues/NBA_{}.html'.format(self.yr))
return pq(sportsref.utils.get_html(url))
@sportsref.decorators.memoize
def get_sub_doc(self, subpage):
"""Returns PyQuery object for a given subpage URL.
:subpage: The subpage of the season, e.g. 'per_game'.
:returns: PyQuery object.
"""
html = sportsref.utils.get_html(self._subpage_url(subpage))
return pq(html)
@sportsref.decorators.memoize
def get_team_ids(self):
"""Returns a list of the team IDs for the given year.
:returns: List of team IDs.
"""
df = self.team_stats_per_game()
if not df.empty:
return df.index.tolist()
else:
print('ERROR: no teams found')
return []
@sportsref.decorators.memoize
def team_ids_to_names(self):
"""Mapping from 3-letter team IDs to full team names.
:returns: Dictionary with team IDs as keys and full team strings as
values.
"""
doc = self.get_main_doc()
table = doc('table#team-stats-per_game')
flattened = sportsref.utils.parse_table(table, flatten=True)
unflattened = sportsref.utils.parse_table(table, flatten=False)
team_ids = flattened['team_id']
team_names = unflattened['team_name']
if len(team_names) != len(team_ids):
raise Exception("team names and team IDs don't align")
return dict(zip(team_ids, team_names))
@sportsref.decorators.memoize
def team_names_to_ids(self):
"""Mapping from full team names to 3-letter team IDs.
:returns: Dictionary with tean names as keys and team IDs as values.
"""
d = self.team_ids_to_names()
return {v: k for k, v in d.items()}
@sportsref.decorators.memoize
@sportsref.decorators.kind_rpb(include_type=True)
def schedule(self, kind='R'):
"""Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
"""
kind = kind.upper()[0]
dfs = []
# get games from each month
for month in ('october', 'november', 'december', 'january', 'february',
'march', 'april', 'may', 'june'):
try:
doc = self.get_sub_doc('games-{}'.format(month))
except ValueError:
continue
table = doc('table#schedule')
df = sportsref.utils.parse_table(table)
dfs.append(df)
df = pd.concat(dfs).reset_index(drop=True)
# figure out how many regular season games
try:
sportsref.utils.get_html('{}/playoffs/NBA_{}.html'.format(
sportsref.nba.BASE_URL, self.yr)
)
is_past_season = True
except ValueError:
is_past_season = False
if is_past_season:
team_per_game = self.team_stats_per_game()
n_reg_games = int(team_per_game.g.sum() // 2)
else:
n_reg_games = len(df)
# subset appropriately based on `kind`
if kind == 'P':
return df.iloc[n_reg_games:]
else:
return df.iloc[:n_reg_games]
def finals_winner(self):
"""Returns the team ID for the winner of that year's NBA Finals.
:returns: 3-letter team ID for champ.
"""
raise NotImplementedError('nba.Season.finals_winner')
def finals_loser(self):
"""Returns the team ID for the loser of that year's NBA Finals.
:returns: 3-letter team ID for runner-up.
"""
raise NotImplementedError('nba.Season.finals_loser')
def standings(self):
"""Returns a DataFrame containing standings information."""
doc = self.get_sub_doc('standings')
east_table = doc('table#divs_standings_E')
east_df = pd.DataFrame(sportsref.utils.parse_table(east_table))
east_df.sort_values('wins', ascending=False, inplace=True)
east_df['seed'] = range(1, len(east_df) + 1)
east_df['conference'] = 'E'
west_table = doc('table#divs_standings_W')
west_df = sportsref.utils.parse_table(west_table)
west_df.sort_values('wins', ascending=False, inplace=True)
west_df['seed'] = range(1, len(west_df) + 1)
west_df['conference'] = 'W'
full_df = pd.concat([east_df, west_df], axis=0).reset_index(drop=True)
full_df['team_id'] = full_df.team_id.str.extract(r'(\w+)\W*\(\d+\)', expand=False)
full_df['gb'] = [gb if isinstance(gb, int) or isinstance(gb, float) else 0
for gb in full_df['gb']]
full_df = full_df.drop('has_class_full_table', axis=1)
expanded_table = doc('table#expanded_standings')
expanded_df = sportsref.utils.parse_table(expanded_table)
full_df = pd.merge(full_df, expanded_df, on='team_id')
return full_df
@sportsref.decorators.memoize
def _get_team_stats_table(self, selector):
"""Helper function for stats tables on season pages. Returns a
DataFrame."""
doc = self.get_main_doc()
table = doc(selector)
df = sportsref.utils.parse_table(table)
df.set_index('team_id', inplace=True)
return df
def team_stats_per_game(self):
"""Returns a Pandas DataFrame of each team's basic per-game stats for
the season."""
return self._get_team_stats_table('table#team-stats-per_game')
def opp_stats_per_game(self):
"""Returns a Pandas DataFrame of each team's opponent's basic per-game
stats for the season."""
return self._get_team_stats_table('table#opponent-stats-per_game')
def team_stats_totals(self):
"""Returns a Pandas DataFrame of each team's basic stat totals for the
season."""
return self._get_team_stats_table('table#team-stats-base')
def opp_stats_totals(self):
"""Returns a Pandas DataFrame of each team's opponent's basic stat
totals for the season."""
return self._get_team_stats_table('table#opponent-stats-base')
def misc_stats(self):
"""Returns a Pandas DataFrame of miscellaneous stats about each team's
season."""
return self._get_team_stats_table('table#misc_stats')
def team_stats_shooting(self):
"""Returns a Pandas DataFrame of each team's shooting stats for the
season."""
return self._get_team_stats_table('table#team_shooting')
def opp_stats_shooting(self):
"""Returns a Pandas DataFrame of each team's opponent's shooting stats
for the season."""
return self._get_team_stats_table('table#opponent_shooting')
@sportsref.decorators.memoize
def _get_player_stats_table(self, identifier):
"""Helper function for player season stats.
:identifier: string identifying the type of stat, e.g. 'per_game'.
:returns: A DataFrame of stats.
"""
doc = self.get_sub_doc(identifier)
table = doc('table#{}_stats'.format(identifier))
df = sportsref.utils.parse_table(table)
return df
def player_stats_per_game(self):
"""Returns a DataFrame of per-game player stats for a season."""
return self._get_player_stats_table('per_game')
def player_stats_totals(self):
"""Returns a DataFrame of player stat totals for a season."""
return self._get_player_stats_table('totals')
def player_stats_per36(self):
"""Returns a DataFrame of player per-36 min stats for a season."""
return self._get_player_stats_table('per_minute')
def player_stats_per100(self):
"""Returns a DataFrame of player per-100 poss stats for a season."""
return self._get_player_stats_table('per_poss')
def player_stats_advanced(self):
"""Returns a DataFrame of player per-100 poss stats for a season."""
return self._get_player_stats_table('advanced')
def mvp_voting(self):
"""Returns a DataFrame containing information about MVP voting."""
raise NotImplementedError('nba.Season.mvp_voting')
def roy_voting(self):
"""Returns a DataFrame containing information about ROY voting."""
url = '{}/awards/awards_{}.html'.format(sportsref.nba.BASE_URL, self.yr)
doc = pq(sportsref.utils.get_html(url))
table = doc('table#roy')
df = sportsref.utils.parse_table(table)
return df
| gpl-3.0 | 5,210,937,238,460,397,000 | 35.538745 | 90 | 0.608362 | false |
Andr3iC/courtlistener | cl/simple_pages/urls.py | 1 | 4018 | from django.conf.urls import url
from django.views.generic import RedirectView
from cl.simple_pages.sitemap import sitemap_maker
from cl.simple_pages.views import (
tools_page, validate_for_google, validate_for_google2, validate_for_wot,
validate_for_bing, robots, advanced_search, contact_thanks, contact, feeds,
coverage_graph, faq, about, browser_warning, serve_static_file, old_terms,
latest_terms, contribute, markdown_help, humans,
)
mime_types = ('pdf', 'wpd', 'txt', 'doc', 'html', 'mp3')
urlpatterns = [
# Footer stuff
url(r'^about/$', about, name='about'),
url(r'^faq/$', faq, name="faq"),
url(r'^coverage/$', coverage_graph, name='coverage'),
url(r'^feeds/$', feeds, name='feeds_info'),
url(r'^contribute/$', contribute, name='contribute'),
url(r'^contact/$', contact, name="contact"),
url(r'^contact/thanks/$', contact_thanks, name='contact_thanks'),
url(r'^help/markdown/$', markdown_help, name="markdown_help"),
# Serve a static file
url(r'^(?P<file_path>(?:' + "|".join(mime_types) + ')/.*)$',
serve_static_file),
# Advanced search page
url(
r'^search/advanced-techniques/$',
advanced_search,
name='advanced_search'
),
url(r'^terms/v/(\d{1,2})/$', old_terms, name='old_terms'),
url(r'^terms/$', latest_terms, name='terms'),
# Randoms
url(
r'^tools/$',
tools_page,
name='tools',
),
url(
r'^bad-browser/$',
browser_warning,
name='bad_browser',
),
# Robots & Humans
url(
r'^robots\.txt$',
robots,
name='robots'
),
url(
r'^humans\.txt$',
humans,
name='humans',
),
# Sitemap:
url(r'^sitemap-simple-pages\.xml$', sitemap_maker),
# SEO-related stuff
url(r'^BingSiteAuth.xml$', validate_for_bing),
url(r'^googleef3d845637ccb353.html$', validate_for_google),
url(r'^google646349975c2495b6.html$', validate_for_google2),
url(r'^mywot8f5568174e171ff0acff.html$', validate_for_wot),
# Favicon, touch icons, etc.
url(r'^favicon\.ico$',
RedirectView.as_view(
url='/static/ico/favicon.ico',
permanent=True)),
url(r'^touch-icon-192x192\.png',
RedirectView.as_view(
url='/static/png/touch-icon-192x192.png',
permanent=True)),
url(r'^apple-touch-icon\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon.png',
permanent=True)),
url(r'^apple-touch-icon-72x72-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-72x72-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-76x76-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-76x76-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-114x114-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-114x114-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-120x120-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-120x120-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-144x144-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-144x144-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-152x152-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-152x152-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-180x180-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-180x180-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-precomposed.png',
permanent=True)),
]
| agpl-3.0 | 1,652,597,468,934,639,900 | 33.050847 | 79 | 0.599552 | false |
jdahlin/d-feet | dfeet/_ui/busnamebox.py | 1 | 2042 | import gobject
import gtk
from dfeet.dbus_introspector import BusWatch
from busnameview import BusNameView
class BusNameBox(gtk.VBox):
__gsignals__ = {
'busname-selected' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,))
}
def __init__(self, watch):
super(BusNameBox, self).__init__()
self.tree_view = BusNameView(watch)
self.tree_view.connect('cursor_changed', self.busname_selected_cb)
scroll = gtk.ScrolledWindow()
scroll.add(self.tree_view)
self.pack_start(scroll, True, True)
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.show_all()
def _completion_match_func(self, completion, key, iter):
print completion, key, iter
return self.tree_view._is_iter_equal(completion.get_model(),
iter, key)
def get_selected_busname(self):
(model, iter) = self.tree_view.get_selection().get_selected()
if not iter:
return None
busname = model.get_value(iter, BusWatch.BUSNAME_OBJ_COL)
return busname
def busname_selected_cb(self, treeview):
busname = self.get_selected_busname()
self.emit('busname-selected', busname)
def set_filter_string(self, value):
self.tree_view.set_filter_string(value)
self.tree_view.refilter()
def set_hide_private(self, hide_private):
self.tree_view.set_hide_private(hide_private)
self.tree_view.refilter()
def set_sort_col(self, value):
if value == 'Common Name':
col = BusWatch.COMMON_NAME_COL
elif value == 'Unique Name':
col = BusWatch.UNIQUE_NAME_COL
elif value == 'Process Name':
col = BusWatch.PROCESS_NAME_COL
else:
raise Exception('Value "' + value + '" is not a valid sort value')
self.tree_view.set_sort_column(col)
#self.tree_view.sort_column_changed()
| gpl-2.0 | -5,044,364,324,568,237,000 | 30.415385 | 78 | 0.599902 | false |
JuanbingTeam/djangobbs | djangobbs/accounts/models.py | 1 | 6806 | #!/usr/bin/env python
#coding=utf-8
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django.utils.translation import ugettext as _T
from djangobbs.addresses.models import Person
from djangobbs.accounts.config import LOGO_FOLDER
from cPickle import dumps
class UserProfile(models.Model):
# 默认关联django.contrib.auth.models.User. 这是UserProfile的标准用法
user = models.ForeignKey(User, unique=True)
# 用户名,由于User本身的用户名不允许使用中文,所以改用该名作为用户的真正登陆名称
nickname = models.CharField(max_length=200, unique=True, db_index=True, blank=False)
# 用户的头像。保存在LOGO_FOLDER目录下
logo = models.FileField(upload_to=LOGO_FOLDER, blank=True, default="")
# 用户的私人信息,用户可以可以不填。
personal_data = models.ForeignKey(Person, null=True, db_index=True, blank=True, default="")
# 用户的附加
extradata = models.ManyToManyField('accounts.ExtraProfileEntry', through='ExtraUserData')
def __unicode__(self):
return self.nickname
admin.site.register(UserProfile)
class ExtraUserData(models.Model):
"""此表真正保存用户的附加数据"""
# 对应该项的用户
user = models.ForeignKey(UserProfile)
# 对应的项
entry = models.ForeignKey('accounts.ExtraProfileEntry')
# 记录的内容
content = models.TextField(blank=True, default="")
# 记录的时间
time = models.DateTimeField(auto_now=True)
def __unicode__(self):
return unicode(self.user) + u'.' + unicode(self.entry) + u'@' + unicode(self.time) + u'.' + self.content
admin.site.register(ExtraUserData)
EXTERNAL_ENTRY_COLLECT_SOURCE = (
('U', _T('By User')), # 由用户填写
('?', _T('Undefined')), # 系统保留。由各个应用自行决定用法
('M', _T('request.META')), # 从request.META读出
('G', _T('request.GET')), # 从request.GET读出
('P', _T('request.POST')), # 从request.POST读出
('R', _T('request.REQUEST')),# 从request.REQUEST读出
('C', _T('request.COOKIE')), # 从request.COOKIES读出
('s', _T('request.session')),# 从request.session读出
('F', _T('request.FILES')), # 从request.FILES读出
)
EXTERNAL_ENTRY_COLLECT_TIME = (
('R', _T('At register')), # 注册时要求填写, 注册后用户不可更改
('M', _T('Manual')), # 注册时填写, 注册后可以由用户手动更改
('I', _T('At login')), # 登陆时自动记录,
('O', _T('At logout')), # 登出时自动记录,
('A', _T('At all request')), # 每次请求都记录,
('?', _T('Undefined')), # 系统保留。由各个应用自行决定用法
)
class ExtraProfileEntry(models.Model):
"""This model records all extra user information required by the bbs system."""
# 给该项取的一个名称。比如'login IP', 'access time'
name = models.CharField(max_length=100, unique=True, db_index=True)
# 取得数据的方式,可以从request里自动获取,或者由用户提供。默认由用户手动提供。参考EXTERNAL_ENTRY_COLLECT_TIME的注释
source = models.CharField(max_length=1, default='U', choices=EXTERNAL_ENTRY_COLLECT_SOURCE)
# 取得数据的时机。是每次登录记录,还是每次请求都记录,还是别的什么,参考EXTERNAL_ENTRY_COLLECT_TIME的注释
time = models.CharField(max_length=1, default='M', choices=EXTERNAL_ENTRY_COLLECT_TIME)
# 用于验证数据,具体还没想好怎么个用法。可能是正则表达式?或者别的什么。
type = models.TextField(blank=True, default='')
# 允许重复出现的次数,默认每个用户每个项目只记录一次。
dupli = models.PositiveIntegerField(null=True, default=1)
# 自动从request字典里读取时对应的关键字,如果是*则将整个字典的内容都记录下来(使用pickle序列化后保存)
keyword = models.TextField(blank=True, default="")
def __unicode__(self):
return self.name
def push(self, user, data):
"""保存额外的用户数据"""
record = ExtraUserData()
record.user = user
record.entry = self
record.content = data
record.save()
if self.dupli != None:
objs = ExtraUserData.objects.filter(user=user).filter(entry=self)
if objs.count() > self.dupli:
obj = objs.order_by('time')[0] # order by time. the 1st is the oldest record.
obj.delete()
def get_request_data(self, request):
"""该函数从request里取出需要保存的数据"""
dict = None
if self.source == 'M':
dict = request.META
elif self.source == 'G':
dict = request.GET
elif self.source == 'P':
dict = request.POST
elif self.source == 'R':
dict = request.REQUEST
elif self.source == 'C':
dict = request.COOKIE
elif self.source == 's':
dict = request.session
elif self.source == 'F':
dict = request.FILES
else:
dict = None
if dict != None:
if self.keyword == '*':
return dumps(dict)
elif dict.has_key(self.keyword):
return dict.get(self.keyword)
return ""
admin.site.register(ExtraProfileEntry)
"""以下是初始化数据内容的"""
try:
superuser = User.objects.get(id=1)
UserProfile.objects.get(user=superuser)
except UserProfile.DoesNotExist:
"""以下代码试图为第一个超级用户初始化一个UserProfile"""
profile = UserProfile()
profile.user = superuser
profile.nickname = superuser.username
profile.personal_data = None
profile.save()
except Exception, error:
pass
try:
if ExtraProfileEntry.objects.all().count() == 0:
"""以下代码使得系统默认记录用户登录的最后10个IP,在models被import时执行。"""
entry = ExtraProfileEntry()
entry.name = 'login IP' # 记录login的IP
entry.source = 'M' # 从request.META读出
entry.time = 'I' # 每次login时记录
entry.type = "IPAddressField"
entry.dupli = 10 # 记录最近10次的IP
entry.keyword = 'REMOTE_ADDR' # 记录request.META['REMOTE_ADDR']
entry.save()
except Exception, error:
pass
| apache-2.0 | 233,650,022,630,310,980 | 31.511628 | 112 | 0.596808 | false |
hammerhorn/hammerhorn-jive | dice/dice.py | 1 | 5645 | #!/usr/bin/env python
#coding=utf-8
"""
die-rolling simulator
Roll a single n-sided die. Number of sides can be specified on the
command line; default is 6.
"""
__author__ = 'Chris Horn <[email protected]>'
import argparse, sys
try:
if sys.version_info.major == 2:
import Tkinter as tk
elif sys.version_info.major == 3:
import tkinter as tk
except ImportError:
sys.exit('Tk could not be loaded. Ending program.')
from cjh.cli import Cli
from cjh.config import Config
from cjh.die import Die
################
# PROCEDURES #
################
def _parse_args():
"""
Parse Args
"""
parser = argparse.ArgumentParser(
description='Variable-sided die simulator.')
parser.add_argument(
'-q', '--quiet', help='suppress ascii art', action='count')
parser.add_argument(
'-d', '--sides', type=int, help='number of sides on the current die')
parser.add_argument(
'-a', '--anim', action='store_true',
help='animated effect (command-line only)')
parser.add_argument(
'-s', '--shell', type=str, help='bash, dialog, sh, Tk, zenity')
return parser.parse_args()
def roll_and_output():
"""
Roll die and show result
"""
global _toggle
if SHELL.interface == 'Tk':
SHELL.msg.config(font=('mono', 10, 'bold'))
die.roll()
if ARGS.quiet > 2:
sys.exit(0)
elif ARGS.quiet == 2:
SHELL.output(die.value)
elif ARGS.quiet == 1:
SHELL.output(die.__str__())
elif SHELL.interface == 'Tk':
_toggle = not _toggle # make this into a generator
if _toggle is True:
SHELL.msg.config(fg='#FF00FF')#, bg='black')
else: SHELL.msg.config(fg='chartreuse')#, bg='black')
SHELL.msgtxt.set(die.draw_face(verbose=True, get_str=True))
SHELL.main_window.title(die)
else:
die.draw_face(verbose=True)
##########
# DATA #
##########
if __name__ == '__main__':
ARGS = _parse_args()
else: ARGS = None
CONFIG = Config()
if ARGS and ARGS.shell:
SHELL = CONFIG.launch_selected_shell(ARGS.shell)
else: SHELL = CONFIG.start_user_profile()
SHELL_NAME = SHELL.interface
lang_key = CONFIG.get_lang_key()
def change_lang(lang_code):
global lang_key, main_menu
lang_key = lang_code
button.config(text={'EN':'Roll', 'EO':'Ruligi'}[lang_key])
main_menu.destroy()
main_menu = tk.Menu(SHELL.main_window, tearoff=0)
lang_menu = tk.Menu(main_menu, tearoff=0)
lang_menu.add_command(label='English', command=lambda: change_lang('EN'))
lang_menu.add_command(label='Esperanto', command=lambda: change_lang('EO'))
main_menu.add_cascade(
label={'EN':'Language', 'EO':'Lingvo'}[lang_key], menu=lang_menu)
main_menu.add_command(
label={'EN':'Exit', 'EO':'Eliri'}[lang_key],
command=SHELL.main_window.destroy)
SHELL.msg.config(
width=150, font=('mono', 12, 'bold'), bg='black', fg='white')
SHELL.msgtxt.set(
{'EN':'Click to roll.', 'EO':'Klaku por ruligi.'}[lang_key])
SHELL.main_window.title({'EN':'dice', 'EO':'ĵetkuboj'}[lang_key])
# Set up Tk window
if SHELL.interface == 'Tk':
if lang_key == 'EO':
SHELL.main_window.title('ĵetkuboj')
SHELL.main_window.config(bg='black')
SHELL.msg.config(font=('mono', 12, 'bold'), bg='black', fg='white')
SHELL.msgtxt.set(
{'EN':'Click to roll.', 'EO':'Klaku por ruligi.'}[lang_key])
button = tk.Button(
SHELL.main_window, text={'EN':"Roll", 'EO':'Ruligi'}[lang_key],
command=roll_and_output)
button.config(
underline=0, bg='black', fg='white', activeforeground='white',
activebackground='black', relief=tk.FLAT, highlightcolor='white')
button.pack(side='top')
button.focus_set()
main_menu = tk.Menu(SHELL.main_window, tearoff=0)
lang_menu = tk.Menu(main_menu, tearoff=0)
#english_checked = tk.IntVar()
#esperanto_checked = tk.IntVar()
#english = tk.Checkbutton(lang_menu, text='English', variable=english_check
#ed)
#esperanto = tk.Checkbutton(lang_menu, variable=esperanto_checked)
lang_menu.add_checkbutton(
label='English', command=lambda: change_lang('EN'))
lang_menu.add_checkbutton(
label='Esperanto', command=lambda: change_lang('EO'))
main_menu.add_cascade(
label={'EN':'Language', 'EO':'Lingvo'}[lang_key], menu=lang_menu)
main_menu.add_command(
label={'EN': 'Exit', 'EO': 'Eliri'}[lang_key],
command=SHELL.main_window.destroy)
# menu.add_command(label='English')
# menu.post(tk.event.x_root, tk.event.y_root)
def main_callback(event):
# SHELL.main_window.focus_set()
main_menu.tk_popup(event.x_root, event.y_root, 0)
# print "clicked at", event.x, event.y
#frame = Frame(root, width=100, height=100)
#frame.bind("<Key>", key)
if SHELL.interface == 'Tk': SHELL.main_window.bind('<Button-3>', main_callback)
#frame.pack()
if SHELL_NAME in ['wx', 'Tk']:
SHELL.center_window(width_=200, height_=200, x_offset=100)
if ARGS and ARGS.sides > 0:
die = Die(ARGS.sides)
else: die = Die()
_toggle = False
def main():
"""
In a text environment, roll one die with or without animation,
according to command-line flags. In Tk, run the main loop.
"""
if SHELL.interface == 'Tk':
SHELL.main_window.mainloop()
else:
while True:
if SHELL_NAME in ['bash', 'sh']:
if ARGS is not None and ARGS.anim:
die.animate()
else: roll_and_output()
Cli.wait()
Cli.clear(9)
if __name__ == '__main__':
main()
| gpl-2.0 | -2,858,508,236,669,075,000 | 29.836066 | 79 | 0.606238 | false |
csningli/MultiAgent | examples/simple_move/simple_move_sim.py | 1 | 2835 |
# MultiAgent
# (c) 2017-2019, NiL, [email protected]
import sys, random, datetime, math
random.seed(datetime.datetime.now())
sys.path.append("../..")
from mas.multiagent import *
AREA_SIZE = 200
POS_ERROR = 5
MIN_SPEED = 100
MAX_SPEED = 500
class TargetModule(Module) :
def __init__(self) :
super(TargetModule, self).__init__()
self.index = 0
self.targets = [
(100, 0), (100, 100),
(0, 100), (-100, 100),
(-100, 0), (-100, -100),
(0, -100), (100, -100),
]
def process(self) :
pos = self.mem.read("pos", None)
target = self.mem.read("target", None)
if pos is not None and (target is None or ppdist_l2(target, pos) <= POS_ERROR) :
self.mem.reg("target", self.targets[self.index])
self.index = (self.index + 1) % len(self.targets)
class SimpleMoveModule(ObjectModule) :
def act(self, resp) :
target = self.mem.read("target", None)
pos = self.mem.read("pos", None)
if target is not None and pos is not None:
diff = vec2_sub(target, pos)
resp.add_msg(Message(key = "angle", value = vec2_angle(diff)))
resp.add_msg(Message(key = "vel", value = vec2_min_max(vec2_scale(diff, 3), MIN_SPEED, MAX_SPEED)))
super(SimpleMoveModule, self).act(resp)
class SimpleMoveAgent(Agent) :
def __init__(self, name) :
super(SimpleMoveAgent, self).__init__(name)
self.mods = [SimpleMoveModule(), TargetModule()]
def focus(self) :
focus_info = super(SimpleMoveAgent, self).get_focus()
target = self.mem.read("target", None)
if target is not None :
focus_info["target"] = "(%4.2f, %4.2f)" % (target[0], target[1])
pos = self.mem.read("pos", None)
if pos is not None :
focus_info["pos"] = "(%4.2f, %4.2f)" % (pos[0], pos[1])
return focus_info
def run_sim(filename = None) :
'''
>>> run_sim()
'''
# create the oracle space
oracle = OracleSpace()
# create the context
context = Context(oracle = oracle)
# create the schedule for adding agents in the running
schedule = Schedule()
# add objects and agents to the context
obj = Object(name = "0")
obj.pos = (0, 0)
context.add_obj(obj)
schedule.add_agent(SimpleMoveAgent(name = "0"))
# create the driver
driver = Driver(context = context, schedule = schedule)
# create the inspector
# inspector = Inspector(delay = 10)
# create the simulator
sim = Simulator(driver = driver)
print("Simulating")
sim.simulate(graphics = True, filename = filename)
if __name__ == '__main__' :
filename = None
if (len(sys.argv) > 1) :
filename = sys.argv[1]
run_sim(filename = filename)
| apache-2.0 | 8,272,226,968,732,636,000 | 24.540541 | 111 | 0.573192 | false |
azotdata/azot-event-extractor | db_mongodb.py | 1 | 4124 | # -*- coding: utf-8 -*-
""""
Script which contains all class definition related to COUCHDB database
"""
from manage_db import *
from mongoengine import *
from utils import *
class Connection(ManageConnection):
"""
Manage connection to database
"""
def __init__(self,db_server):
self.db_server = db_server
ManageConnection.__init__(self)
def connect(self):
self.connection(self.db_server)
""" -------------------------------------------- """
""" ----Database connection must be done here--- """
if DB_SERVER=='couchdb':
from db_couchdb import *
elif DB_SERVER=='mongodb':
from db_mongodb import *
connecting = Connection(DB_SERVER)
connecting.connect()
""" -------------------------------------------- """
class Article(Document):
"""
Represents articles stored in DB. Used for both reading and inserting articles datas
"""
meta = {
'strict': False,
'collection': 'articles'
}
num_cluster = IntField()
pub_date = StringField()
source = StringField()
text = StringField()
title = StringField()
@staticmethod
def check_article_url(url):
if not Article.objects(source=url):
return True
def _set_article(self, article):
self.source = article.url
self.title = article.title
self.text = article.text
if article.publish_date:
self.pub_date = str(article.publish_date[0].date())
else: # just in case publishing date cannot be retrieved, stores 'None'
self.pub_date = str(article.publish_date)
def save_article(self,article):
self._set_article(article)
self.save()
@staticmethod
def get_all_articles():
return Article.objects.all()
def update_article(self,cluster_key):
self.update(set__num_cluster=cluster_key)
class Stopword(Document):
"""
Class for storing stopwords objects
"""
meta = {
'collection': 'stopword'
}
lang = StringField(max_length=50)
word = StringField(max_length=50)
@staticmethod
def sw_exist():
if Stopword.objects:
return True
@staticmethod
def set_stopwords():
word_list = stopwords_list(SW_PATH)
sw_list = []
for lang, word in word_list.iteritems():
for each_word in word:
sw_list.append(Stopword(**{"lang": lang, "word": each_word}))
Stopword.objects.insert(sw_list)
@staticmethod
def get_all_stopwords():
return Stopword.objects.all()
class ClusteringResume(Document):
"""
Stores main useful elements for each cluster
"""
meta = {
'collection': 'clustering_resume',
'strict': False
}
_id = IntField()
keywords = DictField()
cluster_title = StringField()
def set_dataclusters(self,cluster_id,keywords,title):
#cluster_list = []
#for i in range(cluster_number):
self._id = cluster_id
self.keywords = keywords
self.cluster_title = title
self.save()
@staticmethod
def remove_cluster_content():
return ClusteringResume.objects.delete()
class ClusteringReport(Document):
"""
Stores details after clustering.
"""
meta = {
'collection': 'clustering_report'
}
date = StringField()
count = IntField()
iterations = IntField()
nb_cluster = IntField()
cluster_list = DictField()
class ClusteringTagged(Document):
"""
Stores important words of a same topic which are generated after manual classification.
"""
meta = {
'collection': 'clustering_tagged'
}
nb_cluster_before = IntField()
nb_cluster_after = IntField()
tags = StringField()
clusters = DictField()
class TopicWord(Document):
"""
Class for storing important words, called topic, per cluster
"""
meta = {
'collection': 'topic_word'
}
word = StringField()
topic = StringField()
count = IntField()
stopword = BooleanField()
to_delete = BooleanField()
| gpl-3.0 | -8,880,503,633,293,053,000 | 23.843373 | 95 | 0.592871 | false |
drewcsillag/skunkweb | pylibs/pargen/initParGenGrammar.py | 1 | 1108 | #
# Copyright (C) 2001 Andrew T. Csillag <[email protected]>
#
# You may distribute under the terms of either the GNU General
# Public License or the SkunkWeb License, as specified in the
# README file.
#
from RuleItems import Rule
import CompileGrammar
ruleSet=[
#Rule("Start", ['S','$'], 0, 'noop'),
Rule('S', ['RuleList'], 0, 'noop'),
Rule('RuleList', ['Rule', 'RuleList'], 1, 'noop'),
Rule('RuleList', [], 2, 'noop'),
Rule('Rule', ['COLON', 'id', 'COLON', 'TokItem', 'COLON', 'TokList'],
3,'RuleLine'),
Rule('TokList', ['Token', 'TokList'], 4, 'TTtoTokList'),
Rule('TokList', ['id', 'TokList'], 5, 'TTtoTokList'),
Rule('TokList', [], 6, 'NullToTokList'),
Rule('TokItem', ['Token',], 7, 'TokenToTokItem'),
Rule('TokItem', ['id',], 8, 'idToTokItem'),
]
print 'Rules'
print '--------------------'
for i in ruleSet:
print i
grammar = CompileGrammar.compileGrammar('LALR1', ruleSet, showStates = 1, showItems = 1)
gf = open ('ParGenGrammar.py', 'w')
for i in grammar.items():
gf.write('%s = %s\n' % i)
gf.close()
| gpl-2.0 | 5,383,967,760,084,567,000 | 31.588235 | 88 | 0.58213 | false |
stevecassidy/annotationrdf | annotationrdf/annotation_names.py | 1 | 2738 | from namespaces import *
NAMEMAP = {
'ambig-paragraph': ICEA.ambigparagraph,
'bold': ICEA.bold,
'byline': ICEA.byline,
'changed': ICEA.changed,
'corpus': ICEA.corpus,
'correction': ARTA.correction,
'deleted': ICEA.deleted,
'discontinuous': ICEA.discontinuous,
'dubious-nonsense': CA.dubious_nonsense,
'editorial': ICEA.editorial,
'ellipsis': COOEEA.ellipsis,
'elongation': CA.elongation,
'extended list': ACEA.extended_list,
'extratext': ICEA.extratext,
'footnote': ICEA.footnote,
'footnote-ref': ICEA.footnoteref,
'foreign': ICEA.foreign,
'format': ICEA['format'],
'heading': ICEA.heading,
'incomplete': ICEA.incomplete,
'inserted': ICEA.inserted,
'interjection': ARTA.interjection,
'intonation': CA.intonation,
'italic': ICEA.italic,
'latched-utterance': CA.latched_utterance,
'laughter': MONASHA.laughter,
'linebreak': ICEA.linebreak,
'list': ICEA.list,
'longdash': ICEA.longdash,
'longpause': ICEA.longpause,
'marginalia': ICEA.marginalia,
'medpause': ICEA.medpause,
'mention': ICEA.mention,
'micropause': CA.micropause,
'misc': ACEA.misc,
'normalised-discontinuous': ICEA.normalised_discontinuous,
'normative-deletion': ICEA.normative_deletion,
'normative-insertion': ICEA.normative_insertion,
'normative-replacement': ICEA.normative_replacement,
'note': ACEA.note,
'original-normalisation': ICEA.original_normalisation,
'overlap': MONASHA.overlap,
'overlapset': ICEA.overlapset,
'pageno': COOEEA.pageno,
'paragraph': ICEA.paragraph,
'pause': ICEA.pause,
'quote': ICEA.quote,
'redacted': MONASHA.redacted,
'roman': ICEA.roman,
'sent': ICEA.sent,
'signature': ICEA.signature,
'smallcaps': ICEA.smallcaps,
'space': ICEA.space,
'speaker': ICEA.speaker,
'special character': ACEA.special_character,
'speed': CA.speed,
'subheading': ICEA.subheading,
'subscript': ICEA.subscript,
'subtext': ICEA.subtext,
'superscript': ICEA.superscript,
'text': ICEA.text,
'typeface': ICEA.typeface,
'uncertain': ICEA.uncertain,
'underline': ICEA.underline,
'unknown': CA.unknown,
'untranscribed': ICEA.untranscribed,
'unuseable-char': ICEA.unuseable_char,
'volume': CA.volume,
'word': ICEA.word,
'MAU' : MAUS.phonetic,
'KAN' : MAUS.canonical,
'ORT' : MAUS.orthography,
}
| bsd-3-clause | -5,983,904,074,951,426,000 | 34.102564 | 66 | 0.588386 | false |
isomer/faucet | faucet/vlan.py | 1 | 7169 | # Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
from conf import Conf
from valve_util import btos
class VLAN(Conf):
tagged = None
untagged = None
vid = None
faucet_vips = None
bgp_as = None
bgp_local_address = None
bgp_port = None
bgp_routerid = None
bgp_neighbor_addresses = []
bgp_neighbour_addresses = []
bgp_neighbor_as = None
bgp_neighbour_as = None
routes = None
max_hosts = None
unicast_flood = None
acl_in = None
# Define dynamic variables with prefix dyn_ to distinguish from variables set
# configuration
dyn_ipv4_routes = None
dyn_ipv6_routes = None
dyn_arp_cache = None
dyn_nd_cache = None
dyn_host_cache = None
defaults = {
'name': None,
'description': None,
'acl_in': None,
'faucet_vips': None,
'unicast_flood': True,
'bgp_as': 0,
'bgp_local_address': None,
'bgp_port': 9179,
'bgp_routerid': '',
'bgp_neighbour_addresses': [],
'bgp_neighbor_addresses': [],
'bgp_neighbour_as': 0,
'bgp_neighbor_as': None,
'routes': None,
'max_hosts': None,
}
def __init__(self, _id, dp_id, conf=None):
if conf is None:
conf = {}
self._id = _id
self.dp_id = dp_id
self.update(conf)
self.set_defaults()
self._id = _id
self.tagged = []
self.untagged = []
self.dyn_ipv4_routes = {}
self.dyn_ipv6_routes = {}
self.dyn_arp_cache = {}
self.dyn_nd_cache = {}
self.dyn_host_cache = {}
if self.faucet_vips:
self.faucet_vips = [
ipaddress.ip_interface(btos(ip)) for ip in self.faucet_vips]
if self.bgp_as:
assert self.bgp_port
assert ipaddress.IPv4Address(btos(self.bgp_routerid))
for neighbor_ip in self.bgp_neighbor_addresses:
assert ipaddress.ip_address(btos(neighbor_ip))
assert self.bgp_neighbor_as
if self.routes:
self.routes = [route['route'] for route in self.routes]
for route in self.routes:
ip_gw = ipaddress.ip_address(btos(route['ip_gw']))
ip_dst = ipaddress.ip_network(btos(route['ip_dst']))
assert ip_gw.version == ip_dst.version
if ip_gw.version == 4:
self.ipv4_routes[ip_dst] = ip_gw
else:
self.ipv6_routes[ip_dst] = ip_gw
@property
def ipv4_routes(self):
return self.dyn_ipv4_routes
@ipv4_routes.setter
def ipv4_routes(self, value):
self.dyn_ipv4_routes = value
@property
def ipv6_routes(self):
return self.dyn_ipv6_routes
@ipv6_routes.setter
def ipv6_routes(self, value):
self.dyn_ipv6_routes = value
@property
def arp_cache(self):
return self.dyn_arp_cache
@arp_cache.setter
def arp_cache(self, value):
self.dyn_arp_cache = value
@property
def nd_cache(self):
return self.dyn_nd_cache
@nd_cache.setter
def nd_cache(self, value):
self.dyn_nd_cache = value
@property
def host_cache(self):
return self.dyn_host_cache
@host_cache.setter
def host_cache(self, value):
self.dyn_host_cache = value
def set_defaults(self):
for key, value in list(self.defaults.items()):
self._set_default(key, value)
self._set_default('vid', self._id)
self._set_default('name', str(self._id))
self._set_default('faucet_vips', [])
self._set_default('bgp_neighbor_as', self.bgp_neighbour_as)
self._set_default(
'bgp_neighbor_addresses', self.bgp_neighbour_addresses)
def __str__(self):
port_list = [str(x) for x in self.get_ports()]
ports = ','.join(port_list)
return 'vid:%s ports:%s' % (self.vid, ports)
def get_ports(self):
return self.tagged + self.untagged
def mirrored_ports(self):
return [port for port in self.get_ports() if port.mirror]
def mirror_destination_ports(self):
return [port for port in self.get_ports() if port.mirror_destination]
def flood_ports(self, configured_ports, exclude_unicast):
ports = []
for port in configured_ports:
if not port.running:
continue
if exclude_unicast:
if not port.unicast_flood:
continue
ports.append(port)
return ports
def tagged_flood_ports(self, exclude_unicast):
return self.flood_ports(self.tagged, exclude_unicast)
def untagged_flood_ports(self, exclude_unicast):
return self.flood_ports(self.untagged, exclude_unicast)
def port_is_tagged(self, port_number):
for port in self.tagged:
if port.number == port_number:
return True
return False
def port_is_untagged(self, port_number):
for port in self.untagged:
if port.number == port_number:
return True
return False
def is_faucet_vip(self, ip):
for faucet_vip in self.faucet_vips:
if ip == faucet_vip.ip:
return True
return False
def ip_in_vip_subnet(self, ip):
for faucet_vip in self.faucet_vips:
if ip in faucet_vip.network:
return True
return False
def ips_in_vip_subnet(self, ips):
for ip in ips:
if not self.ip_in_vip_subnet(ip):
return False
return True
def from_connected_to_vip(self, src_ip, dst_ip):
"""Return True if src_ip in connected network and dst_ip is a VIP.
Args:
src_ip (ipaddress.ip_address): source IP.
dst_ip (ipaddress.ip_address): destination IP
Returns:
True if local traffic for a VIP.
"""
if self.is_faucet_vip(dst_ip) and self.ip_in_vip_subnet(src_ip):
return True
return False
def to_conf(self):
return self._to_conf()
def __hash__(self):
items = [(k, v) for k, v in list(self.__dict__.items()) if 'dyn' not in k]
return hash(frozenset(list(map(str, items))))
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
| apache-2.0 | -7,152,596,814,654,182,000 | 28.746888 | 82 | 0.580834 | false |
cltrudeau/django-awl | extras/sample_site/app/migrations/0001_initial.py | 1 | 1338 | # Generated by Django 3.0.5 on 2020-06-12 14:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Writer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Show',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('writer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Writer')),
],
),
migrations.CreateModel(
name='Episode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('show', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Show')),
],
),
]
| mit | 4,384,991,229,205,189,600 | 34.210526 | 131 | 0.562033 | false |
jjmalina/pyinfluxql | tests/test_query.py | 1 | 16698 | # -*- coding: utf-8 -*-
"""
test_query
~~~~~~~~~~
Tests the query generator
"""
import six
import pytest
from datetime import datetime, timedelta
import dateutil
from pyinfluxql.functions import Sum, Min, Max, Count, Distinct, Percentile
from pyinfluxql.query import Query, ContinuousQuery
@pytest.mark.unit
def test_clone():
"""Cloning a query instance should return a new query instance with the
same data but different references
"""
query = Query(Count(Distinct('col'))).from_('measurement')
query._limit = 100
query._group_by_time = timedelta(hours=24)
query._group_by.append('col2')
new_query = query.clone()
assert new_query._measurement == query._measurement
assert len(new_query._select_expressions) == len(query._select_expressions)
assert new_query._select_expressions != query._select_expressions
assert new_query._limit == query._limit
assert new_query._group_by_time == query._group_by_time
assert new_query._group_by == query._group_by
new_query._select_expressions.append(Count('blah'))
new_query._limit = 10
new_query._group_by_time = timedelta(days=7)
new_query._group_by.append('col3')
assert len(new_query._select_expressions) != len(query._select_expressions)
assert len(new_query._select_expressions) == 2
assert len(query._select_expressions) == 1
assert new_query._limit != query._limit
assert new_query._limit == 10
assert query._limit == 100
assert new_query._group_by_time != query._group_by_time
assert new_query._group_by_time == timedelta(days=7)
assert query._group_by_time == timedelta(hours=24)
assert new_query._group_by != query._group_by
assert new_query._group_by == ['col2', 'col3']
assert query._group_by == ['col2']
@pytest.mark.unit
def test_select():
"""Selecting should be chainable and add to the `_select_expressions`
list.
"""
q = Query('colname')
query = q.from_('test_measurement')
assert isinstance(query, Query)
assert len(query._select_expressions) == 1
query.select('colname2').select('colname3')
assert isinstance(query, Query)
assert len(query._select_expressions) == 3
query.select('colname4', 'colname5')
assert len(query._select_expressions) == 5
@pytest.mark.unit
def test_format_select():
q = Query().from_('test_measurement')
q._select_expressions = ['hello']
assert q._format_select() == 'SELECT hello'
q._select_expressions = ['hello', 'goodbye']
assert q._format_select() == 'SELECT hello, goodbye'
q = Query().from_('test_measurement')
q._select_expressions = [Sum('hello')]
assert q._format_select() == 'SELECT SUM(hello)'
q._select_expressions = [Sum('hello'), Min('bye')]
assert q._format_select() == 'SELECT SUM(hello), MIN(bye)'
q = Query().from_('1').select(Max(Min('hello')))
assert q._format_select() == 'SELECT MAX(MIN(hello))'
@pytest.mark.unit
def test_format_select_expressions():
"""_format_select_expressions should take multiple arguments and
format functions correctly
"""
q = Query()
assert q._format_select_expressions('1 + 1') == '1 + 1'
assert q._format_select_expressions('1 + 1', 'BLAH') == '1 + 1, BLAH'
assert q._format_select_expressions('1 + 1', 'BLAH', '2') == \
'1 + 1, BLAH, 2'
assert q._format_select_expressions(*[Distinct('a'), 'BLAH', '2']) == \
'DISTINCT(a), BLAH, 2'
@pytest.mark.unit
def test_format_select_rexpression():
"""_format_select_expression should take one argument and if a function
format it correctly
"""
q = Query()
assert q._format_select_expression('a') == 'a'
assert q._format_select_expression(Sum('a')) == 'SUM(a)'
assert q._format_select_expression(Sum(Max('a'))) == 'SUM(MAX(a))'
@pytest.mark.unit
def test_format_measurement():
q = Query().from_('test_measurement')
assert q._format_measurement('test_measurement') == 'test_measurement'
assert q._format_measurement('test series') == '"test series"'
assert q._format_measurement('test-series') == '"test-series"'
assert q._format_measurement('/test series*/') == '/test series*/'
assert q._format_measurement('/test-series*/') == '/test-series*/'
@pytest.mark.unit
def test_format_from():
"""_format_from should format correctly
"""
assert Query().from_('test_measurement')._format_from() == 'FROM test_measurement'
assert Query().from_('test series')._format_from() == 'FROM "test series"'
assert Query().from_('a_series')._format_from() == 'FROM a_series'
assert Query().from_('a series')._format_from() == 'FROM "a series"'
@pytest.mark.unit
def test_where():
"""where should insert into the _where dict and be chainable
"""
q = Query('test_measurement').where(a=1, b=3, c__gt=3)
assert q._where['a'] == 1
assert q._where['b'] == 3
assert q._where['c__gt'] == 3
assert isinstance(q, Query)
@pytest.mark.unit
def test_format_value():
"""_format_value should format strings, ints, floats, bools and
datetimes correctly
"""
q = Query('test_measurement')
assert q._format_value('hello') == "'hello'"
assert q._format_value(1) == "1"
assert q._format_value(1.0) == "1.0"
assert q._format_value(True) == "true"
assert q._format_value(False) == "false"
assert q._format_value('/stats.*/') == "/stats.*/"
assert q._format_value(datetime(2014, 2, 10, 18, 4, 53, 834825)) == \
"'2014-02-10 18:04:53.834'"
assert q._format_value(
datetime(2014, 2, 10, 18, 4, 53, 834825)
.replace(tzinfo=dateutil.tz.gettz('US/Eastern'))) == \
"'2014-02-10 23:04:53.834'"
@pytest.mark.unit
def test_date_range():
q = Query()
start = datetime.utcnow() - timedelta(hours=1)
end = datetime.utcnow() - timedelta(minutes=1)
q.date_range(start)
assert q._where['time__gt'] == start
q = Query()
q.date_range(start, end)
assert q._where['time__gt'] == start
assert q._where['time__lt'] == end
q = Query()
q.date_range(start=start, end=end)
assert q._where['time__gt'] == start
assert q._where['time__lt'] == end
q = Query()
q.date_range(start=10, end=100)
assert q._where['time__gt'] == 10
assert q._where['time__lt'] == 100
with pytest.raises(ValueError):
Query().date_range(end, start)
with pytest.raises(ValueError):
Query().date_range()
@pytest.mark.unit
def test_format_where():
"""_format_where should format an entire where clause correctly
"""
q = Query().where(foo=4)
assert q._format_where() == 'WHERE foo = 4'
q = Query().where(foo__bar=4)
assert q._format_where() == 'WHERE foo.bar = 4'
q = Query().where(foo__bar__lt=4)
assert q._format_where() == 'WHERE foo.bar < 4'
q = Query().where(foo__bar__baz__lt=4)
assert q._format_where() == 'WHERE foo.bar.baz < 4'
query = Query().where(
col1='a',
col2__ne='b',
col3__lt=5,
col4__gt=7.0)
assert query._format_where() == \
"WHERE col1 = 'a' AND col2 != 'b' AND col3 < 5 AND col4 > 7.0"
@pytest.mark.unit
def test_format_where_eq():
"""equals expressions should be formatted correctly in a where clause
"""
q = Query()
assert q._format_where_expression(['col'], 'eq', 'hi') == "col = 'hi'"
@pytest.mark.unit
def test_format_where_ne():
"""not equals expressions should be formatted correctly
in a where clause
"""
q = Query()
assert q._format_where_expression(['col'], 'ne', False) == "col != false"
assert q._format_where_expression(['col'], 'ne', True) == "col != true"
@pytest.mark.unit
def test_format_where_lt():
"""less than expressions should be formatted correctly
in a where clause
"""
q = Query()
assert q._format_where_expression(['col'], 'lt', 1.0) == "col < 1.0"
assert q._format_where_expression(['col'], 'lt', 50) == "col < 50"
@pytest.mark.unit
def test_format_where_gt():
"""greater than expressions should be formatted correctly
in a where clause
"""
q = Query()
assert q._format_where_expression(['col'], 'gt', 1.0) == "col > 1.0"
assert q._format_where_expression(['col'], 'gt', 50) == "col > 50"
@pytest.mark.unit
def test_group_by():
"""group_by should correctly set the query's group by arguments and
be chainable
"""
td = timedelta(hours=1)
q = Query().group_by('col1', 'col2', time=td)
assert isinstance(q, Query)
assert q._group_by_time == td
assert q._group_by == ['col1', 'col2']
q = Query().group_by(time=td, fill=True)
assert q._group_by_time == td
assert q._group_by_fill
q = Query().group_by(time=td, fill=False)
assert not q._group_by_fill
@pytest.mark.unit
def test_group_by_time():
td = timedelta(hours=1)
q = Query().group_by_time(td)
assert q._group_by_time == td
td = timedelta(hours=2)
q.group_by_time(td)
assert q._group_by_time == td
q.group_by_time('1h')
assert q._group_by_time == '1h'
@pytest.mark.unit
def test_format_group_by():
"""_format_group_by should correctly format one or more
group by statements
"""
q = Query().group_by('col1')
assert q._format_group_by() == 'GROUP BY col1'
q.group_by('col2')
assert q._format_group_by() == 'GROUP BY col1, col2'
q.group_by(time=timedelta(days=1))
assert q._format_group_by() == 'GROUP BY time(1d), col1, col2'
q = Query().group_by(time=timedelta(hours=5))
assert q._format_group_by() == 'GROUP BY time(5h)'
q = Query().group_by(time=timedelta(hours=5), fill=True)
assert q._format_group_by() == 'GROUP BY time(5h) fill(0)'
q = Query().group_by(time=timedelta(hours=5), fill=False)
assert q._format_group_by() == 'GROUP BY time(5h)'
q = Query().group_by(time='1h', fill=False)
assert q._format_group_by() == 'GROUP BY time(1h)'
q = Query().group_by_time('1h', fill=True)
assert q._format_group_by() == 'GROUP BY time(1h) fill(0)'
@pytest.mark.unit
def test_limit():
"""limit should set the query's limit argument and be chainable
"""
q = Query().limit(1000)
assert isinstance(q, Query)
assert q._limit == 1000
@pytest.mark.unit
def test_format_limit():
"""_format_lmit should correctly format the limit clause
"""
q = Query().limit(1000)
assert q._format_limit() == 'LIMIT 1000'
@pytest.mark.unit
def test_order():
q = Query().order('time', 'asc')
assert q._order == 'ASC'
q = Query().order('time', 'ASC')
assert q._order == 'ASC'
q = Query().order('time', 'desc')
assert q._order == 'DESC'
q = Query().order('time', 'DESC')
assert q._order == 'DESC'
with pytest.raises(TypeError):
Query().order('-time')
@pytest.mark.unit
def test_format_order():
"""_format_order should correctly format the order clause
"""
q = Query().order('time', 'asc')
assert q._format_order() == 'ORDER BY time ASC'
q.order('time', 'desc')
assert q._format_order() == 'ORDER BY time DESC'
q = Query().order('time', 'ASC')
assert q._format_order() == 'ORDER BY time ASC'
q.order('time', 'DESC')
assert q._format_order() == 'ORDER BY time DESC'
@pytest.mark.unit
def test_into():
q = Query().into('another_series')
assert q._into_series == 'another_series'
@pytest.mark.unit
def test_format_into():
q = Query().into('another_series')
assert q._format_into() == 'INTO another_series'
q = Query()
assert q._format_into() == ''
@pytest.mark.unit
def test_format_query():
q = Query().from_('x')
expected = "SELECT * FROM x;"
assert q._format_query("SELECT * FROM x ") == expected
expected = 'DELETE FROM x;'
assert q._format_query('DELETE FROM x ') == expected
@pytest.mark.unit
def test_format_select_query():
"""_format should correctly format the entire query
"""
# Test simple selects
assert Query('*').from_('x')._format_select_query() == \
"SELECT * FROM x;"
assert Query('a', 'b').from_('x')._format_select_query() == \
"SELECT a, b FROM x;"
# Test limit
assert Query('*').from_('x').limit(100) \
._format_select_query() == "SELECT * FROM x LIMIT 100;"
# Test order
assert Query('*').from_('x').order('time', 'asc') \
._format_select_query() == "SELECT * FROM x ORDER BY time ASC;"
assert Query('*').from_('x').order('time', 'desc') \
._format_select_query() == "SELECT * FROM x ORDER BY time DESC;"
# Test functions
assert Query(Count('a')).from_('x') \
._format_select_query() == "SELECT COUNT(a) FROM x;"
assert Query(Sum(Count('a'))).from_('x') \
._format_select_query() == "SELECT SUM(COUNT(a)) FROM x;"
# Test where, comparators and value formatting
assert Query('*').from_('x').where(a='something') \
._format_select_query() == "SELECT * FROM x WHERE a = 'something';"
assert Query('*').from_('x').where(a='something', b=1) \
._format_select_query() == \
"SELECT * FROM x WHERE a = 'something' AND b = 1;"
assert Query('*').from_('x').where(a__ne='something') \
._format_select_query() == "SELECT * FROM x WHERE a != 'something';"
assert Query('*').from_('x').where(a=True, b=False) \
._format_select_query() == \
"SELECT * FROM x WHERE a = true AND b = false;"
assert Query('*').from_('x').where(a=True, b=False) \
._format_select_query() == \
"SELECT * FROM x WHERE a = true AND b = false;"
assert Query('*').from_('x').where(a__lt=4, b__gt=6.0) \
._format_select_query() == "SELECT * FROM x WHERE a < 4 AND b > 6.0;"
# Test group by
assert Query('*').from_('x').group_by('a') \
._format_select_query() == "SELECT * FROM x GROUP BY a;"
assert Query('*').from_('x').group_by('a', 'b') \
._format_select_query() == "SELECT * FROM x GROUP BY a, b;"
q = Query('*').from_('x') \
.group_by(time=timedelta(hours=1))
assert q._format_select_query() == "SELECT * FROM x GROUP BY time(1h);"
q = Query('*').from_('x') \
.group_by('a', 'b', time=timedelta(hours=1))
assert q._format_select_query() == "SELECT * FROM x GROUP BY time(1h), a, b;"
# Test something really crazy
fmt = "SELECT COUNT(a), SUM(b), PERCENTILE(d, 99) FROM x "
fmt += "WHERE e = false AND f != true AND g < 4 AND h > 5 "
fmt += "GROUP BY time(1h), a, b fill(0) "
fmt += "LIMIT 100 ORDER BY time ASC;"
q = Query(Count('a'), Sum('b'), Percentile('d', 99)) \
.from_('x') \
.where(e=False, f__ne=True, g__lt=4, h__gt=5) \
.group_by('a', 'b', time=timedelta(minutes=60), fill=True) \
.limit(100).order('time', 'asc')
assert q._format_select_query() == fmt
@pytest.mark.unit
def test_format_delete_query():
q = Query().from_('series')
q._is_delete = True
assert q._format_delete_query() == 'DELETE FROM series;'
q.date_range(start=20, end=40)
expected = 'DELETE FROM series WHERE time > 20 AND time < 40;'
assert q._format_delete_query() == expected
q = Query().from_('series')
q.date_range(end=40)
expected = 'DELETE FROM series WHERE time < 40;'
assert q._format_delete_query() == expected
@pytest.mark.unit
def test_format():
q = Query('blah').from_('series')
q._is_delete = True
assert q._format() == 'DELETE FROM series;'
q.date_range(start=20, end=40)
q._is_delete = False
expected = 'SELECT blah FROM series WHERE time > 20 AND time < 40;'
assert q._format() == expected
@pytest.mark.unit
def test_str():
q = Query('blah').from_('series')
q._is_delete = True
assert str(q) == 'DELETE FROM series;'
q.date_range(start=20, end=40)
q._is_delete = False
expected = 'SELECT blah FROM series WHERE time > 20 AND time < 40;'
assert str(q) == expected
@pytest.mark.unit
def test_unicode():
q = Query('blah').from_('series')
q._is_delete = True
assert six.u(str(q)) == u'DELETE FROM series;'
q.date_range(start=20, end=40)
q._is_delete = False
expected = u'SELECT blah FROM series WHERE time > 20 AND time < 40;'
assert six.u(str(q)) == expected
@pytest.mark.unit
def test_format_continuous_query():
q = Query(Count('col')).from_('clicks') \
.group_by(time=timedelta(hours=1)).into('clicks.count.1h')
cq = ContinuousQuery("1h_clicks_count", "test", q)
expected = 'CREATE CONTINUOUS QUERY "1h_clicks_count" ON test BEGIN SELECT COUNT(col) FROM clicks GROUP BY time(1h) INTO clicks.count.1h END'
assert cq._format() == expected
| mit | 6,504,522,673,743,951,000 | 31.486381 | 145 | 0.604324 | false |
EricssonResearch/calvin-base | calvin/runtime/north/authorization/policy_retrieval_point.py | 1 | 4113 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
import os
import glob
import json
from calvin.utilities import calvinuuid
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
# This is an abstract class for the PRP (Policy Retrieval Point)
class PolicyRetrievalPoint(object):
__metaclass__ = ABCMeta # Metaclass for defining Abstract Base Classes
@abstractmethod
def get_policy(self, id):
"""Return a JSON representation of the policy identified by id"""
return
@abstractmethod
def get_policies(self, filter):
"""Return a JSON representation of all policies found by using filter"""
return
@abstractmethod
def create_policy(self, data):
"""Create policy based on the JSON representation in data"""
return
@abstractmethod
def update_policy(self, data, id):
"""Change the content of the policy identified by id to data (JSON representation of policy)"""
return
@abstractmethod
def delete_policy(self, id):
"""Delete the policy identified by id"""
return
class FilePolicyRetrievalPoint(PolicyRetrievalPoint):
def __init__(self, path):
# Replace ~ by the user's home directory.
self.path = os.path.expanduser(path)
if not os.path.exists(self.path):
try:
os.makedirs(self.path)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def get_policy(self, policy_id):
"""Return the policy identified by policy_id"""
try:
with open(os.path.join(self.path, policy_id + ".json"), 'rt') as data:
return json.load(data)
except Exception as err:
_log.error("Failed to open policy file for policy_id={}".format(policy_id))
raise
def get_policies(self, name_pattern='*'):
"""Return all policies found using the name_pattern"""
policies = {}
for filename in glob.glob(os.path.join(self.path, name_pattern + ".json")):
try:
with open(filename, 'rb') as data:
policy_id = os.path.splitext(os.path.basename(filename))[0]
policies[policy_id] = json.load(data)
except ValueError as err:
_log.error("Failed to parse policy as json, file={}".format(filename))
raise
except (OSError, IOError) as err:
_log.error("Failed to open file={}".format(filename))
raise
return policies
def create_policy(self, data):
"""Create policy based on the JSON representation in data"""
policy_id = calvinuuid.uuid("POLICY")
with open(os.path.join(self.path, policy_id + ".json"), "w") as file:
json.dump(data, file)
return policy_id
def update_policy(self, data, policy_id):
"""Change the content of the policy identified by policy_id to data (JSON representation of policy)"""
file_path = os.path.join(self.path, policy_id + ".json")
if os.path.isfile(file_path):
with open(file_path, "w") as file:
json.dump(data, file)
else:
raise IOError # Raise exception if policy named filename doesn't exist
def delete_policy(self, policy_id):
"""Delete the policy named policy_id"""
os.remove(os.path.join(self.path, policy_id + ".json"))
| apache-2.0 | -4,371,406,419,679,389,000 | 36.054054 | 110 | 0.627279 | false |
shawncaojob/LC | PY/679_24_game.py | 1 | 2347 | # 679. 24 Game
# DescriptionHintsSubmissionsDiscussSolution
# DiscussPick One
# You have 4 cards each containing a number from 1 to 9. You need to judge whether they could operated through *, /, +, -, (, ) to get the value of 24.
#
# Example 1:
# Input: [4, 1, 8, 7]
# Output: True
# Explanation: (8-4) * (7-1) = 24
# Example 2:
# Input: [1, 2, 1, 2]
# Output: False
# Note:
# The division operator / represents real division, not integer division. For example, 4 / (1 - 2/3) = 12.
# Every operation done is between two numbers. In particular, we cannot use - as a unary operator. For example, with [1, 1, 1, 1] as input, the expression -1 - 1 - 1 - 1 is not allowed.
# You cannot concatenate numbers together. For example, if the input is [1, 2, 1, 2], we cannot write this as 12 + 12.
# 2018.02.04
# Basic DFS
# Special case, when left 2 elements, we need to check the result from other two element first by saving the existing result.
class Solution(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
return self.dfs(nums, None, None)
# "save" is only needed when handling similar case of (a +/- b) * or / (c +/- d).
# However, this solution is not checking those symbols. Some further optimization can be made to reduce the redundant works.
def dfs(self, nums, cur, save):
if not nums and save is None and self.is24(cur): return True
if not nums and save and self.dfs([save], cur, None): return True
for i in xrange(len(nums)):
num, next_nums = nums[i], nums[:i] + nums[i+1:]
if cur is None: # BUG, if not cur, just set the cur.
if self.dfs(next_nums, num, save): return True
else:
next_cur = [cur + num, cur - num, cur * num, float(cur) / num, num - cur]
if cur != 0: next_cur += [float(num) / cur]
for x in next_cur:
if self.dfs(next_nums, x, save): return True
if len(nums) == 3 and self.dfs(next_nums, None, x): return True # Case (1 + 9) * (1 + 2)
return False
def is24(self, x):
return True if x == 24 or abs(24 - x) < 0.00001 else False
| gpl-3.0 | 7,296,170,151,600,796,000 | 41.672727 | 185 | 0.572646 | false |
zoranzhao/NoSSim | NoS_Vgraph/core_util_plot.py | 1 | 5568 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
def plot(srv_app, srv_lwip, cli_app, cli_lwip):
#srv_app = {0:[],1:[],2:[]}
#srv_lwip = {0:[],1:[],2:[]}
#cli_app = {0:[],1:[],2:[]}
#cli_lwip = {0:[],1:[],2:[]}
O2lwip=cli_lwip[2]
O2comp=cli_app[2]
O1lwip=cli_lwip[1]
O1comp=cli_app[1]
O0lwip=cli_lwip[0]
O0comp=cli_app[0]
colorsred = ['brown', 'red', 'tomato', 'lightsalmon']
colorsgreen = ['darkgreen', 'seagreen', 'limegreen', 'springgreen']
colorsblue =['navy', 'blue', 'steelblue', 'lightsteelblue']
hatches = ['//', '++', 'xxx', 'oo','\\\\\\', 'OO', '..' , '---', "**"]
label_size=15
font_size=15
#client
N = 3
width = 0.25 # the width of the bars
xtra_space = 0.02
ind = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind1 = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind2 = np.arange(N) + 2+(N+1) - (width*3+xtra_space*2)/2 # the x locations for the groups
ind3 = np.arange(N) + 2+N+1+N+1 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind = np.append(ind1, ind2)
ind = np.append(ind, ind3)
#ind = np.append(ind, ind4)
#ind = np.append(ind, ind5)
fig, ax = plt.subplots(2)
a1 = ax[0].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[0].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[0].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[0].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[0].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[0].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
OLevel = ["O-0", "O-1", "O-2", "O-3"]
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - lwIP", " - App."]
legend_size=16
plt.figlegend(
(
a1, a2,
b1, b2,
c1, c2
),
(
OLevel[2]+duration_type[1], OLevel[2]+duration_type[0],
OLevel[1]+duration_type[1], OLevel[1]+duration_type[0],
OLevel[0]+duration_type[1], OLevel[0]+duration_type[0]
),
scatterpoints=1,
loc='upper center',
ncol=3,
prop={'size':legend_size})
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[0].set_xticks( xticks )
ax[0].set_xticks( xticks_minor, minor=True )
ax[0].set_xticklabels( xlbls )
ax[0].set_xlim( 1, 13 )
ax[0].grid( 'off', axis='x' )
ax[0].grid( 'off', axis='x', which='minor' )
# vertical alignment of xtick labels
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[0].get_xticklabels( ), va ):
t.set_y( y )
ax[0].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
#ax.tick_params( axis='x', which='major', direction='out', length=10 )
ax[0].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[0].get_yticks()
ax[0].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#server
O2lwip=srv_lwip[2]
O2comp=srv_app[2]
O1lwip=srv_lwip[1]
O1comp=srv_app[1]
O0lwip=srv_lwip[0]
O0comp=srv_app[0]
a1 = ax[1].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[1].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[1].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[1].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[1].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[1].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - Communication", " - Computation"]
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[1].set_xticks( xticks )
ax[1].set_xticks( xticks_minor, minor=True )
ax[1].set_xticklabels( xlbls )
ax[1].set_xlim( 1, 13 )
ax[1].grid( 'off', axis='x' )
ax[1].grid( 'off', axis='x', which='minor' )
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[1].get_xticklabels( ), va ):
t.set_y( y )
ax[1].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
ax[1].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[1].get_yticks()
ax[1].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
# add some text for labels, title and axes ticks
ax[0].set_ylabel('Core Utilization', fontsize=label_size)
ax[0].set_xlabel('Client', fontsize=label_size)
ax[1].set_ylabel('Core Utilization', fontsize=label_size)
ax[1].set_xlabel('Server', fontsize=label_size)
ax[0].tick_params(axis='y', labelsize=font_size)
ax[1].tick_params(axis='y', labelsize=font_size)
ax[0].tick_params(axis='x', labelsize=font_size)
ax[1].tick_params(axis='x', labelsize=font_size)
plt.show()
| bsd-3-clause | -7,547,278,008,441,325,000 | 30.106145 | 126 | 0.604885 | false |
ROB-Seismology/oq-hazardlib | openquake/hazardlib/tests/gsim/lin_2009_test.py | 1 | 1805 | # The Hazard Library
# Copyright (C) 2013 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.lin_2009 import Lin2009, Lin2009AdjustedSigma
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class Lin2009TestCase(BaseGSIMTestCase):
GSIM_CLASS = Lin2009
# Test data were obtained from a Fortran implementation
# provided by the authors.
def test_mean(self):
self.check('LIN2009/Lin2009_MEAN.csv',
max_discrep_percentage=0.5)
def test_std_total(self):
self.check('LIN2009/Lin2009_TOTAL_STDDEV.csv',
max_discrep_percentage=0.5)
class Lin2009AdjustedSigmaTestCase(BaseGSIMTestCase):
GSIM_CLASS = Lin2009AdjustedSigma
# Test data were obtained from a Fortran implementation
# provided by the authors with sigma coefficients modified from
# Table 7 of Cheng et al. (2013):
# C. -T. Cheng, P. -S. Hsieh, P. -S. Lin, Y. -T. Yen, C. -H. Chan (2013)
# Probability Seismic Hazard Mapping of Taiwan
def test_std_total(self):
self.check('LIN2009/Lin2009AdjustedSigma_TOTAL_STDDEV.csv',
max_discrep_percentage=0.5)
| agpl-3.0 | -2,888,368,218,168,569,300 | 37.404255 | 76 | 0.718006 | false |
PetroDE/control | control/service/buildable.py | 1 | 5256 | """
Specialize Service for services that can't be containers. Images only club
"""
import logging
from os.path import abspath, dirname, isfile, join
from control.cli_builder import builder
from control.repository import Repository
from control.options import options
from control.service.service import ImageService
class Buildable(ImageService):
"""
Okay I lied. There are 3 kinds of services. The problem is that there are
base images that need to be built, but don't know enough to be long running
containers. Control doesn't control containers that you use like
executables. Control handles the starting of long running service daemons
in development and testing environments.
"""
service_options = {
'dockerfile',
'events',
'fromline',
} | ImageService.service_options
all_options = service_options
def __init__(self, service, controlfile):
super().__init__(service, controlfile)
self.logger = logging.getLogger('control.service.Buildable')
self.dockerfile = {'dev': '', 'prod': ''}
self.fromline = {'dev': '', 'prod': ''}
try:
self.events = service.pop('events')
except KeyError:
self.events = {}
self.logger.debug('No events defined')
try:
dkrfile = service.pop('dockerfile')
if isinstance(dkrfile, dict):
self.dockerfile = {
'dev': abspath(join(dirname(self.controlfile),
dkrfile['dev'])),
'prod': abspath(join(dirname(self.controlfile),
dkrfile['prod'])),
}
elif dkrfile == "":
self.dockerfile = {'dev': "", 'prod': ""}
else:
self.dockerfile = {
'dev': abspath(join(dirname(self.controlfile), dkrfile)),
'prod': abspath(join(dirname(self.controlfile), dkrfile)),
}
self.logger.debug('setting dockerfile %s', self.dockerfile)
except KeyError as e:
# Guess that there's a Dockerfile next to the Controlfile
dkrfile = join(abspath(dirname(self.controlfile)), 'Dockerfile')
devfile = dkrfile + '.dev'
prdfile = dkrfile + '.prod'
try:
self.dockerfile['dev'], self.dockerfile['prod'] = {
# devProdAreEmpty, DockerfileExists, DevProdExists
(True, True, False): lambda f, d, p: (f, f),
(True, False, True): lambda f, d, p: (d, p),
(True, False, False): lambda f, d, p: ('', ''),
# This list is sparsely populated because these are the
# only conditions that mean the values need to be guessed
}[(
not self.dockerfile['dev'] and not self.dockerfile['prod'],
isfile(dkrfile),
isfile(devfile) and isfile(prdfile)
)](dkrfile, devfile, prdfile)
self.logger.debug('setting dockerfile: %s', self.dockerfile)
except KeyError as e:
self.logger.warning(
'%s: problem setting dockerfile: %s missing',
self.service,
e)
if 'fromline' in service:
fline = service.pop('fromline')
if isinstance(fline, dict):
self.fromline = {
'dev': fline.get('dev', ''),
'prod': fline.get('prod', '')
}
else:
self.fromline = {
'dev': fline,
'prod': fline
}
# The rest of the options can be straight assigned
# for key, val in (
# (key, val)
# for key, val in service.items()
# if key in self.service_options):
# self.logger.debug('buildable assigning key %s value %s', key, val)
# self.__dict__[key] = val
if not self.service:
self.logger.debug('setting service name from guess')
self.service = Repository.match(self.image).image
self.logger.debug('Found Buildable %s', self.service)
def dump_build(self, prod=False, pretty=True):
"""dump out a CLI version of how this image would be built"""
rep = builder('build', pretty=pretty) \
.tag(self.image) \
.path(dirname(self.controlfile)) \
.file(self.dockerfile['prod'] if prod else self.dockerfile['dev']) \
.pull(options.pull) \
.rm(options.no_rm) \
.force_rm(options.force) \
.no_cache(not options.cache)
return rep
def buildable(self):
"""Check if the service is buildable"""
return self.dockerfile['dev'] or self.dockerfile['prod']
def dev_buildable(self):
"""Check if the service is buildable in a dev environment"""
return self.dockerfile['prod']
def prod_buildable(self):
"""Check if the service is buildable in a prod environment"""
return self.dockerfile['prod']
| mit | -7,254,068,709,873,107,000 | 38.223881 | 80 | 0.53672 | false |
OpenGov/og-python-utils | tests/checks_test.py | 1 | 1777 | # This import fixes sys.path issues
from .parentpath import *
import unittest
from .fake_iterable import FakeIterable
from ogutils import checks
class ChecksTest(unittest.TestCase):
def test_booleanize_false(self):
self.assertEquals(checks.booleanize(False), False)
self.assertEquals(checks.booleanize(0), False)
self.assertEquals(checks.booleanize(''), False)
self.assertEquals(checks.booleanize([]), False)
self.assertEquals(checks.booleanize(set()), False)
self.assertEquals(checks.booleanize({}), False)
self.assertEquals(checks.booleanize(FakeIterable([])), False)
self.assertEquals(checks.booleanize('false'), False)
self.assertEquals(checks.booleanize('FaLsE'), False)
self.assertEquals(checks.booleanize('FALSE'), False)
self.assertEquals(checks.booleanize('0'), False)
self.assertEquals(checks.booleanize(e for e in []), False)
def test_booleanize_true(self):
self.assertEquals(checks.booleanize(True), True)
self.assertEquals(checks.booleanize(1), True)
self.assertEquals(checks.booleanize('a'), True)
self.assertEquals(checks.booleanize([0]), True)
self.assertEquals(checks.booleanize(set([0])), True)
self.assertEquals(checks.booleanize({'a': 0}), True)
self.assertEquals(checks.booleanize(FakeIterable([1])), True)
self.assertEquals(checks.booleanize('true'), True)
self.assertEquals(checks.booleanize('TrUe'), True)
self.assertEquals(checks.booleanize('TRUE'), True)
self.assertEquals(checks.booleanize('1'), True)
self.assertEquals(checks.booleanize(e for e in [0]), True)
if __name__ == "__main__":
unittest.main()
| mit | 5,541,737,921,280,505,000 | 44.763158 | 69 | 0.669668 | false |
blockstack/blockstack-server | integration_tests/blockstack_integration_tests/scenarios/name_preorder.py | 1 | 2869 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Blockstack
Blockstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
"""
import testlib
import virtualchain
import blockstack
import blockstack.blockstackd as blockstackd
wallets = [
testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ),
testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ),
testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 100000000000 ),
testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 100000000000 ),
testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 100000000000 )
]
consensus = "17ac43c1d8549c3181b200f1bf97eb7d"
def scenario( wallets, **kw ):
testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_ready( "test", wallets[1].privkey )
testlib.next_block( **kw )
resp = testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr )
testlib.next_block( **kw )
def check( state_engine ):
# not revealed, but ready
ns = state_engine.get_namespace_reveal( "test" )
if ns is not None:
return False
ns = state_engine.get_namespace( "test" )
if ns is None:
return False
if ns['namespace_id'] != 'test':
return False
# preordered
preorder = state_engine.get_name_preorder( "foo.test", virtualchain.make_payment_script(wallets[2].addr), wallets[3].addr )
if preorder is None:
return False
# paid fee
if preorder['op_fee'] < blockstack.lib.client.get_name_cost('foo.test', hostport='http://localhost:16264'):
print "{} < {}".format(preorder['op_fee'], blockstack.lib.client.get_name_cost('foo.test', hostport='http://localhost:16264'))
print "Insufficient fee"
return False
return True
| gpl-3.0 | -3,451,106,283,767,778,000 | 35.782051 | 144 | 0.700941 | false |
ValdisVitolins/espeak-ng | src/ucd-tools/tools/categories.py | 8 | 8186 | #!/usr/bin/python
# Copyright (C) 2012-2018 Reece H. Dunn
#
# This file is part of ucd-tools.
#
# ucd-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ucd-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ucd-tools. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import ucd
ucd_rootdir = sys.argv[1]
ucd_version = sys.argv[2]
unicode_chars = {}
for data in ucd.parse_ucd_data(ucd_rootdir, 'UnicodeData'):
for codepoint in data['CodePoint']:
unicode_chars[codepoint] = data['GeneralCategory']
if '--with-csur' in sys.argv:
for csur in ['Klingon']:
for data in ucd.parse_ucd_data('data/csur', csur):
for codepoint in data['CodePoint']:
unicode_chars[codepoint] = data['GeneralCategory']
# This map is a combination of the information in the UnicodeData and Blocks
# data files. It is intended to reduce the number of character tables that
# need to be generated.
category_sets = [
(ucd.CodeRange('000000..00D7FF'), None, 'Multiple Blocks'),
(ucd.CodeRange('00D800..00DFFF'), 'Cs', 'Surrogates'),
(ucd.CodeRange('00E000..00F7FF'), 'Co', 'Private Use Area'),
(ucd.CodeRange('00F800..02FAFF'), None, 'Multiple Blocks'),
(ucd.CodeRange('02FB00..0DFFFF'), 'Cn', 'Unassigned'),
(ucd.CodeRange('0E0000..0E01FF'), None, 'Multiple Blocks'),
(ucd.CodeRange('0E0200..0EFFFF'), 'Cn', 'Unassigned'),
(ucd.CodeRange('0F0000..0FFFFD'), 'Co', 'Plane 15 Private Use'),
(ucd.CodeRange('0FFFFE..0FFFFF'), 'Cn', 'Plane 15 Private Use'),
(ucd.CodeRange('100000..10FFFD'), 'Co', 'Plane 16 Private Use'),
(ucd.CodeRange('10FFFE..10FFFF'), 'Cn', 'Plane 16 Private Use'),
]
# These categories have many pages consisting of just this category:
# Cn -- Unassigned
# Lo -- CJK Ideographs
special_categories = ['Cn', 'Co', 'Lo', 'Sm', 'So']
category_tables = {}
for codepoints, category, comment in category_sets:
if not category:
table = {}
table_entry = None
table_codepoint = None
table_category = None
for i, codepoint in enumerate(codepoints):
try:
category = unicode_chars[codepoint]
except KeyError:
category = 'Cn' # Unassigned
if (i % 256) == 0:
if table_entry:
if table_category in special_categories:
table[table_codepoint] = table_category
elif table_category:
raise Exception('%s only table not in the special_categories list.' % table_category)
else:
table[table_codepoint] = table_entry
table_entry = []
table_codepoint = codepoint
table_category = category
if category != table_category:
table_category = None
table_entry.append(category)
if table_entry:
if table_category in special_categories:
table[table_codepoint] = table_category
else:
table[table_codepoint] = table_entry
category_tables['%s_%s' % (codepoints.first, codepoints.last)] = table
if __name__ == '__main__':
sys.stdout.write("""/* Unicode General Categories
*
* Copyright (C) 2012-2018 Reece H. Dunn
*
* This file is part of ucd-tools.
*
* ucd-tools is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ucd-tools is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ucd-tools. If not, see <http://www.gnu.org/licenses/>.
*/
/* NOTE: This file is automatically generated from the UnicodeData.txt file in
* the Unicode Character database by the ucd-tools/tools/categories.py script.
*/
#include "ucd/ucd.h"
#include <stddef.h>
#define Cc UCD_CATEGORY_Cc
#define Cf UCD_CATEGORY_Cf
#define Cn UCD_CATEGORY_Cn
#define Co UCD_CATEGORY_Co
#define Cs UCD_CATEGORY_Cs
#define Ll UCD_CATEGORY_Ll
#define Lm UCD_CATEGORY_Lm
#define Lo UCD_CATEGORY_Lo
#define Lt UCD_CATEGORY_Lt
#define Lu UCD_CATEGORY_Lu
#define Mc UCD_CATEGORY_Mc
#define Me UCD_CATEGORY_Me
#define Mn UCD_CATEGORY_Mn
#define Nd UCD_CATEGORY_Nd
#define Nl UCD_CATEGORY_Nl
#define No UCD_CATEGORY_No
#define Pc UCD_CATEGORY_Pc
#define Pd UCD_CATEGORY_Pd
#define Pe UCD_CATEGORY_Pe
#define Pf UCD_CATEGORY_Pf
#define Pi UCD_CATEGORY_Pi
#define Po UCD_CATEGORY_Po
#define Ps UCD_CATEGORY_Ps
#define Sc UCD_CATEGORY_Sc
#define Sk UCD_CATEGORY_Sk
#define Sm UCD_CATEGORY_Sm
#define So UCD_CATEGORY_So
#define Zl UCD_CATEGORY_Zl
#define Zp UCD_CATEGORY_Zp
#define Zs UCD_CATEGORY_Zs
#define Ii UCD_CATEGORY_Ii
/* Unicode Character Data %s */
""" % ucd_version)
for category in special_categories:
sys.stdout.write('\n')
sys.stdout.write('static const uint8_t categories_%s[256] =\n' % category)
sys.stdout.write('{')
for i in range(0, 256):
if (i % 16) == 0:
sys.stdout.write('\n\t/* %02X */' % i)
sys.stdout.write(' %s,' % category)
sys.stdout.write('\n};\n')
for codepoints, category, comment in category_sets:
if not category:
tables = category_tables['%s_%s' % (codepoints.first, codepoints.last)]
for codepoint in sorted(tables.keys()):
table = tables[codepoint]
if table in special_categories:
continue
sys.stdout.write('\n')
sys.stdout.write('static const uint8_t categories_%s[256] =\n' % codepoint)
sys.stdout.write('{')
for i, category in enumerate(table):
if (i % 16) == 0:
sys.stdout.write('\n\t/* %02X */' % i)
sys.stdout.write(' %s,' % category)
sys.stdout.write('\n};\n')
for codepoints, category, comment in category_sets:
if not category:
table_index = '%s_%s' % (codepoints.first, codepoints.last)
sys.stdout.write('\n')
sys.stdout.write('static const uint8_t *categories_%s[] =\n' % table_index)
sys.stdout.write('{\n')
for codepoint, table in sorted(category_tables[table_index].items()):
if isinstance(table, str):
sys.stdout.write('\tcategories_%s, /* %s */\n' % (table, codepoint))
else:
sys.stdout.write('\tcategories_%s,\n' % codepoint)
sys.stdout.write('};\n')
sys.stdout.write('\n')
sys.stdout.write('ucd_category ucd_lookup_category(codepoint_t c)\n')
sys.stdout.write('{\n')
for codepoints, category, comment in category_sets:
if category:
sys.stdout.write('\tif (c <= 0x%s) return %s; /* %s : %s */\n' % (codepoints.last, category, codepoints, comment))
else:
sys.stdout.write('\tif (c <= 0x%s) /* %s */\n' % (codepoints.last, codepoints))
sys.stdout.write('\t{\n')
sys.stdout.write('\t\tconst uint8_t *table = categories_%s_%s[(c - 0x%s) / 256];\n' % (codepoints.first, codepoints.last, codepoints.first))
sys.stdout.write('\t\treturn (ucd_category)table[c % 256];\n')
sys.stdout.write('\t}\n')
sys.stdout.write('\treturn Ii; /* Invalid Unicode Codepoint */\n')
sys.stdout.write('}\n')
sys.stdout.write("""
ucd_category_group ucd_get_category_group_for_category(ucd_category c)
{
switch (c)
{
case Cc: case Cf: case Cn: case Co: case Cs:
return UCD_CATEGORY_GROUP_C;
case Ll: case Lm: case Lo: case Lt: case Lu:
return UCD_CATEGORY_GROUP_L;
case Mc: case Me: case Mn:
return UCD_CATEGORY_GROUP_M;
case Nd: case Nl: case No:
return UCD_CATEGORY_GROUP_N;
case Pc: case Pd: case Pe: case Pf: case Pi: case Po: case Ps:
return UCD_CATEGORY_GROUP_P;
case Sc: case Sk: case Sm: case So:
return UCD_CATEGORY_GROUP_S;
case Zl: case Zp: case Zs:
return UCD_CATEGORY_GROUP_Z;
case Ii:
default:
return UCD_CATEGORY_GROUP_I;
}
}
ucd_category_group ucd_lookup_category_group(codepoint_t c)
{
return (ucd_category_group)ucd_get_category_group_for_category(ucd_lookup_category(c));
}
""")
| gpl-3.0 | -2,705,840,602,450,405,000 | 33.108333 | 143 | 0.691669 | false |
twisted/mantissa | xmantissa/test/historic/test_port1to2.py | 1 | 1157 | """
Upgrader tests for L{xmantissa.port} items.
"""
from xmantissa.port import TCPPort, SSLPort
from xmantissa.web import SiteConfiguration
from axiom.test.historic.stubloader import StubbedTest
from xmantissa.test.historic.stub_port1to2 import TCP_PORT, SSL_PORT
class PortInterfaceUpgradeTest(StubbedTest):
"""
Schema upgrade tests for L{xmantissa.port} items.
This upgrade adds an "interface" attribute.
"""
def test_TCPPort(self):
"""
Test the TCPPort 1->2 schema upgrade.
"""
port = self.store.findUnique(TCPPort)
self.assertEqual(port.portNumber, TCP_PORT)
self.assertTrue(isinstance(port.factory, SiteConfiguration))
self.assertEqual(port.interface, u'')
def test_SSLPort(self):
"""
Test the SSLPort 1->2 schema upgrade.
"""
port = self.store.findUnique(SSLPort)
self.assertEqual(port.portNumber, SSL_PORT)
self.assertEqual(port.certificatePath,
self.store.newFilePath('certificate'))
self.assertTrue(isinstance(port.factory, SiteConfiguration))
self.assertEqual(port.interface, u'')
| mit | 4,553,412,484,645,735,000 | 31.138889 | 68 | 0.677615 | false |
rgllm/uminho | 04/CN/TP3/src/src/parser/PsoTools.py | 1 | 4783 | import itertools
import json
import matplotlib.pyplot as plt
from matplotlib import style
import os
style.use('ggplot')
import numpy as np
from pprint import pprint
from os.path import basename
xrange=range
class PsoTools(object):
def __init__(self):
pass
# Convert a data raw file to a json file
def rawToJson(self, inputFilePath, outputFilePath):
inFile = open(inputFilePath, mode='r')
outFile = open(outputFilePath, mode='w')
meta_data = dict.fromkeys(['nb_customers', 'nb_depots',
'vehicle_cap', 'vehicle_cost', 'cost_type'])
cust_dict = dict.fromkeys(['x', 'y', 'demand'])
dep_dict = dict.fromkeys(['x', 'y', 'capacity'])
customers = {}
depots = {}
# Number of customers and available depots
nb_customers = int(inFile.readline())
nb_depots = int(inFile.readline())
meta_data['nb_customers'] = nb_customers
meta_data['nb_depots'] = nb_depots
inFile.readline() # Empty line
# Depots cordinates
for i, line in enumerate(inFile):
if i < nb_depots:
x = float(line.split()[0])
y = float(line.split()[1])
depots['d'+str(i)] = {}
depots['d'+str(i)]['x'] = x
depots['d'+str(i)]['y'] = y
else:
i=i-1
break
# Customers cordinates and vehicule capacity
for i, line in enumerate(inFile):
if i < nb_customers:
x = float(line.split()[0])
y = float(line.split()[1])
customers['c'+str(i)] = {}
customers['c'+str(i)]['x'] = x
customers['c'+str(i)]['y'] = y
else:
break
# Vehicules and depots capacity
for i, line in enumerate(inFile):
if i == 0:
vehicle_cap = float(line)
meta_data['vehicle_cap'] = vehicle_cap
elif i == 1:
pass
elif i < nb_depots+2:
depot_cap = float(line)
depots['d'+str(i-2)]['capacity'] = depot_cap
else:
break
# Customers demands
for i, line in enumerate(inFile):
if i < nb_customers:
demand = float(line)
customers['c'+str(i)]['demand'] = demand
else:
break
# Depots openning costs
for i, line in enumerate(inFile):
if i < nb_depots:
openning_cost = float(line)
depots['d'+str(i)]['opening_cost'] = openning_cost
elif i == nb_depots:
pass
elif i == nb_depots+1:
vehicle_cost = float(line)
meta_data['vehicle_cost'] = vehicle_cost
elif i == nb_depots+2:
pass
elif i == nb_depots+3:
cost_type = float(line)
meta_data['cost_type'] = cost_type
else:
break
final_output = {}
final_output['customers'] = customers
final_output['depots'] = depots
final_output['meta_data'] = meta_data
json.dump(final_output, outFile, indent=4)
inFile.close()
outFile.close()
# Plot the customers on the map
def plotCustomers(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
coords_cust = np.zeros(shape=(nb_customers,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot the depots on the map
def plotDepots(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_depots = data['meta_data']['nb_depots']
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot both depots and customers on the map
def plotAll(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
nb_depots = data['meta_data']['nb_depots']
coords_cust = np.zeros(shape=(nb_customers,2))
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
filename = str(basename(os.path.splitext(jsonInputFile)[0]) + '.pdf')
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='s', s=10, linewidth=5)
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='8', s=10, linewidth=5)
plt.savefig(filename, format='pdf')
#~ plt.show()
| mit | 7,930,167,950,086,020,000 | 25.870787 | 83 | 0.612795 | false |
dtroyer/python-openstacksdk | openstack/tests/unit/identity/v3/test_credential.py | 1 | 1958 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.identity.v3 import credential
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'blob': '1',
'id': IDENTIFIER,
'project_id': '3',
'type': '4',
'user_id': '5',
}
class TestCredential(base.TestCase):
def test_basic(self):
sot = credential.Credential()
self.assertEqual('credential', sot.resource_key)
self.assertEqual('credentials', sot.resources_key)
self.assertEqual('/credentials', sot.base_path)
self.assertEqual('identity', sot.service.service_type)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_get)
self.assertTrue(sot.allow_update)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
self.assertEqual('PATCH', sot.update_method)
self.assertDictEqual(
{
'type': 'type',
'user_id': 'user_id',
'limit': 'limit',
'marker': 'marker',
},
sot._query_mapping._mapping)
def test_make_it(self):
sot = credential.Credential(**EXAMPLE)
self.assertEqual(EXAMPLE['blob'], sot.blob)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['project_id'], sot.project_id)
self.assertEqual(EXAMPLE['type'], sot.type)
self.assertEqual(EXAMPLE['user_id'], sot.user_id)
| apache-2.0 | 1,760,831,423,039,303,400 | 33.350877 | 75 | 0.643003 | false |
OpenTechFund/WebApp | opentech/public/funds/migrations/0004_fundpagerelatedpage.py | 1 | 1183 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-12 15:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('public_funds', '0003_icon_and_related_pages'),
]
operations = [
migrations.CreateModel(
name='FundPageRelatedPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
('source_page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_pages', to='public_funds.FundPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| gpl-2.0 | 5,493,090,841,830,758,000 | 37.16129 | 168 | 0.607777 | false |
vvvityaaa/tornado-challenger | challenge_manager.py | 1 | 4420 | import os.path
import tornado.ioloop
import tornado.web
import tornado.options
import tornado.httpserver
import mongoengine
from models import Challenge, ChallengePoint
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/', IndexHandler),
(r'/challenges/$', ChallengeListHandler),
(r'/challenges/(\w+)$', ChallengeDetailHandler),
(r'/edit/(\w+)$', EditHandler),
(r'/add', EditHandler),
(r'/add_point/(\w+)$', EditPointHandler),
(r'/edit_point/(\w+)x(\d+)$', EditPointHandler),#contains url and key parameter
]
settings = dict(
template_path = os.path.join(os.path.dirname(__file__),'templates'),
static_path = os.path.join(os.path.dirname(__file__),'static'),
debug = True
)
mongoengine.connect('challenger') #connection to DB named 'challenger via mongoengine driver
tornado.web.Application.__init__(self,handlers,**settings)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render('base.html')
class ChallengeListHandler(tornado.web.RequestHandler):
def get(self):
self.render('challenge_list.html', challenges=Challenge.objects)
class ChallengeDetailHandler(tornado.web.RequestHandler):
def get(self, url):
self.render('challenge_detail.html', challenge=Challenge.objects.get(url=url))
class EditHandler(tornado.web.RequestHandler):
'''
Handles both adding and editing of Challenge model.
You can create new Challenge instance via form, but you need to create at least one ChallengePoint to send the form.
'''
def get(self, url=None):
'''
If in the url you have url keyword, then it is the argument by get method and it will be only edited
'''
if url:
self.render('edit_challenge.html', challenge=Challenge.objects.get(url=url))
else:
self.render('add.html')
def post(self, url=None):
'''
If url, then model will be only edited.
'''
challenge = dict()
challenge_fields = ['header', 'url', 'date_start', 'date_end']
if url:
challenge = Challenge.objects.get(url=url) #gets challenge object parameters to edit them
for field in challenge_fields:
challenge[field] = self.get_argument(field, None)
if url:
challenge.save()
else:
point = dict()
point_fields=['title', 'key', 'done', 'required_time']
for field in point_fields:
point[field] = self.get_argument(field, None)
challenge['points'] = [ChallengePoint(**point)] #you have to create at least one point entry to send the form
Challenge(**challenge).save()#you call new Challenge instance giving it arguments taken from dictionary and saves it
self.redirect('/challenges/')
class EditPointHandler(tornado.web.RequestHandler):
'''
Implements editting and adding of challenge points.
If key is fetched by url, then point will be just edited
'''
def get(self, url, key = None):
if key:
self.render('edit_challenge_point.html',
challenge_point = Challenge.objects.get(url=url).points.get(key=key))
else:
self.render('add_challenge_point.html')
def post(self, url, key = None):
challenge_point = dict()
challenge_point_fields = ['title','key','done',
'required_time']
if key:
challenge = Challenge.objects.get(url=url)
challenge_point = challenge.points.get(key=key)
for field in challenge_point_fields:
challenge_point[field] = self.get_argument(field, None)
if key:
challenge.points.save()
else:
c = Challenge.objects.get(url=url).points.create(**challenge_point)
c.save()
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| gpl-2.0 | -2,984,514,721,142,183,400 | 33.80315 | 128 | 0.610181 | false |
reebalazs/gf.rejuice | setup.py | 1 | 1731 |
__version__ = '0.3'
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'docs/README.txt')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
setup(
name = 'gf.rejuice',
version = __version__,
description = '`gf.rejuice` provides additional tools for developers to use `Juicer` '
'for the compression of Javascript and CSS resources, '
'in the context of python web applications that run via WSGI.',
long_description = README + '\n\n' + CHANGES,
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
keywords = 'web middleware wsgi css js juicer merging minifying development',
author = "Balazs Ree",
author_email = "[email protected]",
url = "https://launchpad.net/gf.rejuice",
license = "GPL",
packages = find_packages(),
include_package_data = True,
namespace_packages = ['gf'],
zip_safe = False,
install_requires=[
'setuptools',
'lxml >= 2.1.1',
'WebOb',
],
test_suite = "gf.rejuice",
tests_require=[
'BeautifulSoup',
'setuptools',
'lxml >= 2.1.1',
'WebOb',
],
entry_points = """\
[paste.filter_app_factory]
develjuice = gf.rejuice.develjuice:make_middleware
[console_scripts]
rejuice = gf.rejuice.rejuice_script:main
"""
)
| gpl-2.0 | -4,068,187,978,773,046,300 | 29.910714 | 90 | 0.574235 | false |
munin/munin | munin/mod/launch.py | 1 | 2955 | """
Loadable.Loadable subclass
"""
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
import re
import datetime
from munin import loadable
class launch(loadable.loadable):
def __init__(self, cursor):
super().__init__(cursor, 1)
self.paramre = re.compile(r"^\s*(\S+|\d+)\s+(\d+)")
self.usage = self.__class__.__name__ + " <class|eta> <land_tick>"
self.helptext = [
"Calculate launch tick, launch time, prelaunch tick and prelaunch modifier for a given ship class or eta, and land tick."
]
self.class_eta = {"fi": 8, "co": 8, "fr": 9, "de": 9, "cr": 10, "bs": 10}
def execute(self, user, access, irc_msg):
if access < self.level:
irc_msg.reply("You do not have enough access to use this command")
return 0
m = self.paramre.search(irc_msg.command_parameters)
if not m:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
eta = m.group(1)
land_tick = int(m.group(2))
if eta.lower() in list(self.class_eta.keys()):
eta = self.class_eta[eta.lower()]
else:
try:
eta = int(eta)
except ValueError:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
current_tick = self.current_tick(irc_msg.round)
current_time = datetime.datetime.utcnow()
launch_tick = land_tick - eta
launch_time = current_time + datetime.timedelta(
hours=(launch_tick - current_tick)
)
prelaunch_tick = land_tick - eta + 1
prelaunch_mod = launch_tick - current_tick
irc_msg.reply(
"eta %d landing pt %d (currently %d) must launch at pt %d (%s), or with prelaunch tick %d (currently %+d)"
% (
eta,
land_tick,
current_tick,
launch_tick,
(launch_time.strftime("%m-%d %H:55")),
prelaunch_tick,
prelaunch_mod,
)
)
return 1
| gpl-2.0 | 4,536,700,690,433,954,300 | 31.833333 | 133 | 0.59357 | false |
gutorc92/curitools | setup.py | 1 | 1514 | from setuptools import setup, find_packages
from os import path, listdir
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst')) as f:
long_description = f.read()
setup(
name='CUriTools',
version='0.7.0',
packages=find_packages(exclude=["curitools.tests"]),
url='https://github.com/gutorc92/curitools',
author='Gustavo Coelho',
author_email='[email protected]',
license='Creative Commons Attribution-Noncommercial-Share Alike license',
long_description=long_description,
keywords='uri',
install_requires=['requests','click','clint','bs4'],
package_data={
'curitools': ["templates/*"],
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
entry_points={
'console_scripts': [
'curi=curitools.curitools:uri',
],
},
)
| mit | 4,636,919,008,187,735,000 | 29.897959 | 77 | 0.606341 | false |
shanot/imp | modules/em/test/test_rasterization.py | 2 | 2031 | import IMP
import IMP.test
import IMP.algebra
import IMP.em
from io import BytesIO
class Tests(IMP.test.TestCase):
def test_rasterization(self):
"""Test creation DensityMap from grid"""
t = IMP.algebra.Vector3D(1., 2., 3.)
r = IMP.algebra.get_identity_rotation_3d()
tran = IMP.algebra.Transformation3D(r, t)
rf = IMP.algebra.ReferenceFrame3D(tran)
var = IMP.algebra.Vector3D(4., 5., 6.)
g = IMP.algebra.Gaussian3D(rf, var)
sio = BytesIO()
g.show(sio)
def check_gauss(g):
g_rf = g.get_reference_frame()
self.assertLess(IMP.algebra.get_distance(
g_rf.get_transformation_to().get_translation(), t), 1e-4)
self.assertLess(IMP.algebra.get_distance(g.get_variances(), var),
1e-4)
self.assertLess(IMP.algebra.get_distance(g.get_center(), t), 1e-4)
check_gauss(g)
covar = IMP.algebra.get_covariance(g)
g2 = IMP.algebra.get_gaussian_from_covariance(covar, t)
check_gauss(g2)
bb = IMP.algebra.BoundingBox3D(t, t)
grid_slow = IMP.algebra.get_rasterized([g], [1.0], 1.0, bb)
grid_fast = IMP.algebra.get_rasterized_fast([g], [1.0], 1.0, bb)
# Make sure that the returned grids are usable in Python
self.assertLess((grid_fast.get_origin() - t).get_magnitude(), 1e-4)
self.assertLess((grid_slow.get_origin() - t).get_magnitude(), 1e-4)
d_slow=IMP.em.create_density_map(grid_slow)
d_fast=IMP.em.create_density_map(grid_fast)
# Check returned density maps
d_origin = IMP.algebra.Vector3D(1.5, 2.5, 3.5)
self.assertLess((d_fast.get_origin() - d_origin).get_magnitude(), 1e-4)
self.assertLess((d_slow.get_origin() - d_origin).get_magnitude(), 1e-4)
self.assertAlmostEqual(d_fast.get_spacing(), 1.0, delta=1e-4)
self.assertAlmostEqual(d_slow.get_spacing(), 1.0, delta=1e-4)
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 | 7,407,767,106,597,813,000 | 42.212766 | 79 | 0.602166 | false |
todor943/mapEngine | MapApi/views.py | 1 | 3383 | import json
import pprint
import time
import datetime
import django.core.serializers
from django.contrib.auth import *
from django.contrib.auth.decorators import *
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.serializers import geojson
from django.core import *
from django.http import *
from django.shortcuts import *
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import *
from rest_framework import parsers, renderers
from rest_framework.authtoken.models import Token
# from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework_gis.serializers import GeoFeatureModelSerializer,GeoFeatureModelListSerializer
from rest_framework_gis.serializers import ListSerializer, ModelSerializer
from rest_framework.serializers import Serializer, FloatField, CharField
from rest_framework.serializers import DecimalField, IntegerField, DateTimeField
from rest_framework.response import Response
from rest_framework.views import APIView
import MapApp
class FakeApiView(View):
def get(self, request):
data = django.core.serializers.serialize(
"geojson", MapApp.models.MapEntity.objects.all()
)
return HttpResponse(data)
def post(self, request, *args, **kwargs):
data = {}
if request.user.is_authenticated():
requestData = json.loads(request.POST['jsonData'])
now = time.time()
if 'position' not in requestData:
return JsonResponse({})
request.session['location'] = requestData['position']
request.session['mapOptions'] = requestData['mapOptions']
request.session['lastUpdate'] = time.time()
radius = requestData['radius']
searchPnt = self.locationToPoint(requestData['position']);
# now = datetime.datetime.now()
# earlier = now - datetime.timedelta(hours=1)
time_filter = datetime.datetime.now() - datetime.timedelta(hours = 1)
data = MapApp.models.MapEntity.objects.filter(
geoLocationField__distance_lte=(searchPnt, radius),
publishDate__gte=time_filter
)
data = django.core.serializers.serialize("geojson", data)
print ("Updated the user's map state in session")
# print request.user.get_username()
return HttpResponse(data)
def locationToPoint(self, position):
return GEOSGeometry('POINT(' + str(position['lng']) + ' '+ str(position['lat']) + ')', srid=4326)
def handleRequest(self, request):
pass
def getEventsInRadius(self, centerPnt, distance):
pass
def updateSession(self, request):
pass
class ObtainAuthToken(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
# Token.objects
# token, created = Token.objects.create(user=user)
doDelete = True
try:
currentToken = Token.objects.get(user=user)
# TODO
except Exception:
doDelete = False
if doDelete:
print("Renewing user token")
currentToken.delete()
else :
print("Attempting to create new user token")
token = Token.objects.create(user=user)
return Response({'token': token.key})
| apache-2.0 | -7,913,719,095,525,884,000 | 30.616822 | 101 | 0.754064 | false |
CoRfr/testman4trac | testman4trac/0.11/testmanager/model.py | 1 | 34790 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2011 Roberto Longobardi
#
# This file is part of the Test Manager plugin for Trac.
#
# The Test Manager plugin for Trac is free software: you can
# redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later
# version.
#
# The Test Manager plugin for Trac is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Test Manager plugin for Trac. See the file LICENSE.txt.
# If not, see <http://www.gnu.org/licenses/>.
#
import copy
import re
import time
from datetime import date, datetime
from trac.core import *
from trac.db import Table, Column, Index
from trac.env import IEnvironmentSetupParticipant
from trac.perm import PermissionError
from trac.resource import Resource, ResourceNotFound
from trac.util.datefmt import utc, utcmax
from trac.util.text import CRLF
from trac.wiki.api import WikiSystem
from trac.wiki.model import WikiPage
from tracgenericclass.model import IConcreteClassProvider, AbstractVariableFieldsObject, AbstractWikiPageWrapper, need_db_create_for_realm, create_db_for_realm, need_db_upgrade_for_realm, upgrade_db_for_realm
from tracgenericclass.util import *
from testmanager.util import *
try:
from testmanager.api import _, tag_, N_
except ImportError:
from trac.util.translation import _, N_
tag_ = _
class AbstractTestDescription(AbstractWikiPageWrapper):
"""
A test description object based on a Wiki page.
Concrete subclasses are TestCatalog and TestCase.
Uses a textual 'id' as key.
Comprises a title and a description, currently embedded in the wiki
page respectively as the first line and the rest of the text.
The title is automatically wiki-formatted as a second-level title
(i.e. sorrounded by '==').
"""
# Fields that must not be modified directly by the user
protected_fields = ('id', 'page_name')
def __init__(self, env, realm='testdescription', id=None, page_name=None, title=None, description=None, db=None):
self.env = env
self.values = {}
self.values['id'] = id
self.values['page_name'] = page_name
self.title = title
self.description = description
self.env.log.debug('Title: %s' % self.title)
self.env.log.debug('Description: %s' % self.description)
key = self.build_key_object()
AbstractWikiPageWrapper.__init__(self, env, realm, key, db)
def post_fetch_object(self, db):
# Fetch the wiki page
AbstractWikiPageWrapper.post_fetch_object(self, db)
# Then parse it and derive title, description and author
self.title = get_page_title(self.wikipage.text)
self.description = get_page_description(self.wikipage.text)
self.author = self.wikipage.author
self.env.log.debug('Title: %s' % self.title)
#self.env.log.debug('Description: %s' % self.description)
def pre_insert(self, db):
""" Assuming the following fields have been given a value before this call:
title, description, author, remote_addr
"""
self.text = '== '+self.title+' ==' + CRLF + CRLF + self.description
AbstractWikiPageWrapper.pre_insert(self, db)
return True
def pre_save_changes(self, db):
""" Assuming the following fields have been given a value before this call:
title, description, author, remote_addr
"""
self.text = '== '+self.title+' ==' + CRLF + CRLF + self.description
AbstractWikiPageWrapper.pre_save_changes(self, db)
return True
class TestCatalog(AbstractTestDescription):
"""
A container for test cases and sub-catalogs.
Test catalogs are organized in a tree. Since wiki pages are instead
on a flat plane, we use a naming convention to flatten the tree into
page names. These are examples of wiki page names for a tree:
TC --> root of the tree. This page is automatically
created at plugin installation time.
TC_TT0 --> test catalog at the first level. Note that 0 is
the catalog ID, generated at creation time.
TC_TT0_TT34 --> sample sub-catalog, with ID '34', of the catalog
with ID '0'
TC_TT27 --> sample other test catalog at first level, with
ID '27'
There is not limit to the depth of a test tree.
Test cases are contained in test catalogs, and are always
leaves of the tree:
TC_TT0_TT34_TC65 --> sample test case, with ID '65', contained
in sub-catalog '34'.
Note that test case IDs are independent on
test catalog IDs.
"""
def __init__(self, env, id=None, page_name=None, title=None, description=None, db=None):
AbstractTestDescription.__init__(self, env, 'testcatalog', id, page_name, title, description, db)
def get_enclosing_catalog(self):
"""
Returns the catalog containing this test catalog, or None if its a root catalog.
"""
page_name = self.values['page_name']
cat_page = page_name.rpartition('_TT')[0]
if cat_page == 'TC':
return None
else:
cat_id = page_name.rpartition('TT')[0].page_name.rpartition('TT')[2].rpartition('_')[0]
return TestCatalog(self.env, cat_id, cat_page)
def list_subcatalogs(self, db=None):
"""
Returns a list of the sub catalogs of this catalog.
"""
tc_search = TestCatalog(self.env)
tc_search['page_name'] = self.values['page_name'] + '_TT%'
cat_re = re.compile('^TT[0-9]*$')
for tc in tc_search.list_matching_objects(exact_match=False, db=db):
# Only return direct sub-catalogs and exclude test cases
if cat_re.match(tc['page_name'].partition(self.values['page_name']+'_')[2]) :
yield tc
def list_testcases(self, plan_id=None, deep=False, db=None):
"""
Returns a list of the test cases in this catalog.
If plan_id is provided, returns a list of TestCaseInPlan objects,
otherwise, of TestCase objects.
:deep: if True indicates to return all TCs in the catalog and
recursively in all the contained sub-catalogs.
"""
self.env.log.debug('>>> list_testcases')
if plan_id is not None:
from testmanager.api import TestManagerSystem
default_status = TestManagerSystem(self.env).get_default_tc_status()
tc_search = TestCase(self.env)
tc_search['page_name'] = self.values['page_name'] + ('_TC%', '%_TC%')[deep]
for tc in tc_search.list_matching_objects(exact_match=False, db=db):
self.env.log.debug(' ---> Found testcase %s' % tc['id'])
if plan_id is None:
yield tc
else:
tcip = TestCaseInPlan(self.env, tc['id'], plan_id)
if not tcip.exists:
tcip['status'] = default_status
yield tcip
self.env.log.debug('<<< list_testcases')
def list_testplans(self, db=None):
"""
Returns a list of test plans for this catalog.
"""
self.env.log.debug('>>> list_testplans')
tp_search = TestPlan(self.env)
tp_search['catid'] = self.values['id']
tp_search['contains_all'] = None
tp_search['freeze_tc_versions'] = None
for tp in tp_search.list_matching_objects(db=db):
yield tp
self.env.log.debug('<<< list_testplans')
def pre_delete(self, db):
"""
Delete all contained test catalogs and test cases, recursively.
"""
AbstractTestDescription.pre_delete(self, db)
self.env.log.debug("Deleting all test cases related to this catalog id '%s'" % self['id'])
for tc in self.list_testcases(db=db):
tc.delete(db=db)
self.env.log.debug("Deleting all sub catalogs in this catalog id '%s'" % self['id'])
for tcat in self.list_subcatalogs(db=db):
tcat.delete(db=db)
return True
def post_delete(self, db):
"""
Deletes the test plans associated to this catalog and the status
of the test cases in those plans and their status change
history.
"""
self.env.log.debug("Deleting all test plans related to this catalog id '%s'" % self['id'])
for tp in self.list_testplans(db):
tp.delete(db=db)
def create_instance(self, key):
return TestCatalog(self.env, key['id'])
class TestCase(AbstractTestDescription):
def __init__(self, env, id=None, page_name=None, title=None, description=None, db=None):
AbstractTestDescription.__init__(self, env, 'testcase', id, page_name, title, description, db)
def get_enclosing_catalog(self):
"""
Returns the catalog containing this test case.
"""
page_name = self.values['page_name']
cat_id = page_name.rpartition('TT')[2].rpartition('_')[0]
cat_page = page_name.rpartition('_TC')[0]
return TestCatalog(self.env, cat_id, cat_page)
def create_instance(self, key):
return TestCase(self.env, key['id'])
def move_to(self, tcat, db=None):
"""
Moves the test case into a different catalog.
Note: the test case keeps its ID, and the wiki page is moved
into the new name. This way, the page change history is kept.
"""
db, handle_ta = get_db_for_write(self.env, db)
# Rename the wiki page
new_page_name = tcat['page_name'] + '_TC' + self['id']
cursor = db.cursor()
cursor.execute("UPDATE wiki SET name = %s WHERE name = %s",
(new_page_name, self['page_name']))
if handle_ta:
db.commit()
# Invalidate Trac 0.12 page name cache
try:
del WikiSystem(self.env).pages
except:
pass
# TODO Move wiki page attachments
#from trac.attachment import Attachment
#Attachment.delete_all(self.env, 'wiki', self.name, db)
# Remove test case from all the plans
tcip_search = TestCaseInPlan(self.env)
tcip_search['id'] = self.values['id']
for tcip in tcip_search.list_matching_objects(db=db):
tcip.delete(db)
# Update self properties and save
self['page_name'] = new_page_name
self.wikipage = WikiPage(self.env, new_page_name)
self.save_changes('System', "Moved to a different catalog",
datetime.now(utc), db)
def get_related_tickets(self, db=None):
"""
Returns an iterator over the IDs of the ticket opened against
this test case.
"""
self.env.log.debug('>>> get_related_tickets')
if db is None:
db = get_db(self.env, db)
cursor = db.cursor()
cursor.execute("SELECT id FROM ticket WHERE id in " +
"(SELECT ticket FROM ticket_custom WHERE name='testcaseid' AND value=%s)",
(self.values['page_name'],))
for row in cursor:
self.env.log.debug(' ---> Found ticket %s' % row[0])
yield row[0]
self.env.log.debug('<<< get_related_tickets')
def post_delete(self, db):
"""
Deletes the test case from all plans and its status change
history.
"""
self.env.log.debug("Deleting the case case from all plans and its status change history")
cursor = db.cursor()
# Delete test cases in plan
cursor.execute('DELETE FROM testcaseinplan WHERE id = %s', (self['id'],))
# Delete test case status history
cursor.execute('DELETE FROM testcasehistory WHERE id = %s', (self['id'],))
class TestCaseInPlan(AbstractVariableFieldsObject):
"""
This object represents a test case in a test plan.
It keeps the latest test execution status (aka verdict).
The status, as far as this class is concerned, can be just any
string.
The plugin logic, anyway, currently recognizes only three hardcoded
statuses, but this can be evolved without need to modify also this
class.
The history of test execution status changes is instead currently
kept in another table, testcasehistory, which is not backed by any
python class.
This is a duplication, since the 'changes' table also keeps track
of status changes, so the testcasehistory table may be removed in
the future.
"""
# Fields that must not be modified directly by the user
protected_fields = ('id', 'planid', 'page_name', 'page_version', 'status')
def __init__(self, env, id=None, planid=None, page_name=None, page_version=-1, status=None, db=None):
"""
The test case in plan is related to a test case, the 'id' and
'page_name' arguments, and to a test plan, the 'planid'
argument.
"""
self.values = {}
self.values['id'] = id
self.values['planid'] = planid
self.values['page_name'] = page_name
self.values['page_version'] = page_version
self.values['status'] = status
key = self.build_key_object()
AbstractVariableFieldsObject.__init__(self, env, 'testcaseinplan', key, db)
def get_key_prop_names(self):
return ['id', 'planid']
def create_instance(self, key):
return TestCaseInPlan(self.env, key['id'], key['planid'])
def set_status(self, status, author, db=None):
"""
Sets the execution status of the test case in the test plan.
This method immediately writes into the test case history, but
does not write the new status into the database table for this
test case in plan.
You need to call 'save_changes' to achieve that.
"""
status = status.lower()
self['status'] = status
db, handle_ta = get_db_for_write(self.env, db)
cursor = db.cursor()
sql = 'INSERT INTO testcasehistory (id, planid, time, author, status) VALUES (%s, %s, %s, %s, %s)'
cursor.execute(sql, (self.values['id'], self.values['planid'], to_any_timestamp(datetime.now(utc)), author, status))
if handle_ta:
db.commit()
def list_history(self, db=None):
"""
Returns an ordered list of status changes, along with timestamp
and author, starting from the most recent.
"""
if db is None:
db = get_db(self.env, db)
cursor = db.cursor()
sql = "SELECT time, author, status FROM testcasehistory WHERE id=%s AND planid=%s ORDER BY time DESC"
cursor.execute(sql, (self.values['id'], self.values['planid']))
for ts, author, status in cursor:
yield ts, author, status.lower()
def get_related_tickets(self, db=None):
"""
Returns an iterator over the IDs of the ticket opened against
this test case and this test plan.
"""
self.env.log.debug('>>> get_related_tickets')
if db is None:
db = get_db(self.env, db)
cursor = db.cursor()
cursor.execute("SELECT id FROM ticket WHERE id in " +
"(SELECT ticket FROM ticket_custom WHERE name='testcaseid' AND value=%s) " +
"AND id in " +
"(SELECT ticket FROM ticket_custom WHERE name='planid' AND value=%s) ",
(self.values['page_name'], self.values['planid']))
for row in cursor:
self.env.log.debug(' ---> Found ticket %s' % row[0])
yield row[0]
self.env.log.debug('<<< get_related_tickets')
def update_version(self):
"""
Updates the wiki page version reference to be the latest available version.
"""
self.env.log.debug('>>> update_version')
wikipage = WikiPage(self.env, self.values['page_name'])
self['page_version'] = wikipage.version
self.env.log.debug('<<< update_version')
def delete_history(self, db=None):
"""
Deletes all entries in the testcasehistory related to this test case in plan
"""
self.env.log.debug('>>> delete_history')
db, handle_ta = get_db_for_write(self.env, db)
cursor = db.cursor()
# Delete test case status history
cursor.execute('DELETE FROM testcasehistory WHERE id = %s and planid = %s', (self['id'], self['planid']))
if handle_ta:
db.commit()
self.env.log.debug('<<< delete_history')
class TestPlan(AbstractVariableFieldsObject):
"""
A test plan represents a particular instance of test execution
for a test catalog.
You can create any number of test plans on any test catalog (or
sub-catalog).
A test plan is associated to a test catalog, and to every
test case in it, with the initial state equivalent to
"to be executed".
The association with test cases is achieved through the
TestCaseInPlan objects.
For optimization purposes, a TestCaseInPlan is created in the
database only as soon as its status is changed (i.e. from "to be
executed" to something else).
So you cannot always count on the fact that a TestCaseInPlan
actually exists for every test case in a catalog, when a particular
test plan has been created for it.
"""
# Fields that must not be modified directly by the user
protected_fields = ('id', 'catid', 'page_name', 'name', 'author', 'time', 'contains_all', 'freeze_tc_versions')
selected_tcs = []
def __init__(self, env, id=None, catid=None, page_name=None, name=None, author=None, contains_all=1, snapshot=0, selected_tcs=[], db=None):
"""
A test plan has an ID, generated at creation time and
independent on those for test catalogs and test cases.
It is associated to a test catalog, the 'catid' and 'page_name'
arguments.
It has a name and an author.
"""
self.values = {}
self.values['id'] = id
self.values['catid'] = catid
self.values['page_name'] = page_name
self.values['name'] = name
self.values['author'] = author
self.values['contains_all'] = contains_all
self.values['freeze_tc_versions'] = snapshot
self.selected_tcs = selected_tcs
key = self.build_key_object()
AbstractVariableFieldsObject.__init__(self, env, 'testplan', key, db)
def create_instance(self, key):
return TestPlan(self.env, key['id'])
def post_insert(self, db):
"""
If only some test cases must be in the plan, then create the
corresponding TestCaseInPlan objects and relate them to this plan.
"""
self.env.log.debug(">>> post_insert")
if not self.values['contains_all']:
# Create a TestCaseInPlan for each test case specified by the User
from testmanager.api import TestManagerSystem
default_status = TestManagerSystem(self.env).get_default_tc_status()
author = self.values['author']
for tc_page_name in self.selected_tcs:
if tc_page_name != '':
tc_id = tc_page_name.rpartition('TC')[2]
tcip = TestCaseInPlan(self.env, tc_id, self.values['id'])
if not tcip.exists:
tc = TestCase(self.env, tc_id)
tcip['page_name'] = tc['page_name']
if self.values['freeze_tc_versions']:
# Set the wiki page version to the current latest version
tcip['page_version'] = tc.wikipage.version
tcip.set_status(default_status, author)
tcip.insert()
elif self.values['freeze_tc_versions']:
# Create a TestCaseInPlan for each test case in the catalog, and
# set the wiki page version to the current latest version
self.env.log.debug(" - 1 -")
tcat = TestCatalog(self.env, self.values['catid'], self.values['page_name'])
from testmanager.api import TestManagerSystem
default_status = TestManagerSystem(self.env).get_default_tc_status()
author = self.values['author']
for tc in tcat.list_testcases(deep=True):
self.env.log.debug(" - 2 -")
tcip = TestCaseInPlan(self.env, tc.values['id'], self.values['id'])
if not tcip.exists:
tcip['page_name'] = tc['page_name']
tcip['page_version'] = tc.wikipage.version
tcip.set_status(default_status, author)
self.env.log.debug(" - 3 - %s %s", tcip['id'], tcip['page_name'])
tcip.insert()
self.env.log.debug("<<< post_insert")
def post_delete(self, db):
self.env.log.debug("Deleting this test plan %s" % self['id'])
# Remove all test cases (in plan) from this plan
#self.env.log.debug("Deleting all test cases in the plan...")
#tcip_search = TestCaseInPlan(self.env)
#tcip_search['planid'] = self.values['id']
#for tcip in tcip_search.list_matching_objects(db=db):
# self.env.log.debug("Deleting test case in plan, with id %s" % tcip['id'])
# tcip.delete(db)
cursor = db.cursor()
# Delete test cases in plan
cursor.execute('DELETE FROM testcaseinplan WHERE planid = %s', (self['id'],))
# Delete test case status history
cursor.execute('DELETE FROM testcasehistory WHERE planid = %s', (self['id'],))
def get_related_tickets(self, db):
pass
class TestManagerModelProvider(Component):
"""
This class provides the data model for the test management plugin.
The actual data model on the db is created starting from the
SCHEMA declaration below.
For each table, we specify whether to create also a '_custom' and
a '_change' table.
This class also provides the specification of the available fields
for each class, being them standard fields and the custom fields
specified in the trac.ini file.
The custom field specification follows the same syntax as for
Tickets.
Currently, only 'text' type of custom fields are supported.
"""
implements(IConcreteClassProvider, IEnvironmentSetupParticipant)
SCHEMA = {
'testmanager_templates':
{'table':
Table('testmanager_templates', key = ('id', 'name', 'type'))[
Column('id'),
Column('name'),
Column('type'),
Column('description'),
Column('content')],
'has_custom': False,
'has_change': False,
'version': 1},
'testconfig':
{'table':
Table('testconfig', key = ('propname'))[
Column('propname'),
Column('value')],
'has_custom': False,
'has_change': False,
'version': 1},
'testcatalog':
{'table':
Table('testcatalog', key = ('id'))[
Column('id'),
Column('page_name')],
'has_custom': True,
'has_change': True,
'version': 1},
'testcase':
{'table':
Table('testcase', key = ('id'))[
Column('id'),
Column('page_name')],
'has_custom': True,
'has_change': True,
'version': 1},
'testcaseinplan':
{'table':
Table('testcaseinplan', key = ('id', 'planid'))[
Column('id'),
Column('planid'),
Column('page_name'),
Column('page_version', type='int'),
Column('status')],
'has_custom': True,
'has_change': True,
'version': 2},
'testcasehistory':
{'table':
Table('testcasehistory', key = ('id', 'planid', 'time'))[
Column('id'),
Column('planid'),
Column('time', type=get_timestamp_db_type()),
Column('author'),
Column('status'),
Index(['id', 'planid', 'time'])],
'has_custom': False,
'has_change': False,
'version': 1},
'testplan':
{'table':
Table('testplan', key = ('id'))[
Column('id'),
Column('catid'),
Column('page_name'),
Column('name'),
Column('author'),
Column('time', type=get_timestamp_db_type()),
Column('contains_all', type='int'),
Column('freeze_tc_versions', type='int'),
Index(['id']),
Index(['catid'])],
'has_custom': True,
'has_change': True,
'version': 2}
}
FIELDS = {
'testcatalog': [
{'name': 'id', 'type': 'text', 'label': N_('ID')},
{'name': 'page_name', 'type': 'text', 'label': N_('Wiki page name')}
],
'testcase': [
{'name': 'id', 'type': 'text', 'label': N_('ID')},
{'name': 'page_name', 'type': 'text', 'label': N_('Wiki page name')}
],
'testcaseinplan': [
{'name': 'id', 'type': 'text', 'label': N_('ID')},
{'name': 'planid', 'type': 'text', 'label': N_('Plan ID')},
{'name': 'page_name', 'type': 'text', 'label': N_('Wiki page name')},
{'name': 'page_version', 'type': 'int', 'label': N_('Wiki page version')},
{'name': 'status', 'type': 'text', 'label': N_('Status')}
],
'testplan': [
{'name': 'id', 'type': 'text', 'label': N_('ID')},
{'name': 'catid', 'type': 'text', 'label': N_('Catalog ID')},
{'name': 'page_name', 'type': 'text', 'label': N_('Wiki page name')},
{'name': 'name', 'type': 'text', 'label': N_('Name')},
{'name': 'author', 'type': 'text', 'label': N_('Author')},
{'name': 'time', 'type': 'time', 'label': N_('Created')},
{'name': 'contains_all', 'type': 'int', 'label': N_('Contains all Test Cases')},
{'name': 'freeze_tc_versions', 'type': 'text', 'label': N_('Freeze Test Case versions')}
]
}
METADATA = {'testcatalog': {
'label': "Test Catalog",
'searchable': True,
'has_custom': True,
'has_change': True
},
'testcase': {
'label': "Test Case",
'searchable': True,
'has_custom': True,
'has_change': True
},
'testcaseinplan': {
'label': "Test Case in a Plan",
'searchable': True,
'has_custom': True,
'has_change': True
},
'testplan': {
'label': "Test Plan",
'searchable': True,
'has_custom': True,
'has_change': True
}
}
# IConcreteClassProvider methods
def get_realms(self):
yield 'testcatalog'
yield 'testcase'
yield 'testcaseinplan'
yield 'testplan'
def get_data_models(self):
return self.SCHEMA
def get_fields(self):
return copy.deepcopy(self.FIELDS)
def get_metadata(self):
return self.METADATA
def create_instance(self, realm, key=None):
self.env.log.debug(">>> create_instance - %s %s" % (realm, key))
obj = None
if realm == 'testcatalog':
if key is not None:
obj = TestCatalog(self.env, key['id'])
else:
obj = TestCatalog(self.env)
elif realm == 'testcase':
if key is not None:
obj = TestCase(self.env, key['id'])
else:
obj = TestCase(self.env)
elif realm == 'testcaseinplan':
if key is not None:
obj = TestCaseInPlan(self.env, key['id'], key['planid'])
else:
obj = TestCaseInPlan(self.env)
elif realm == 'testplan':
if key is not None:
obj = TestPlan(self.env, key['id'])
else:
obj = TestPlan(self.env)
self.env.log.debug("<<< create_instance")
return obj
def check_permission(self, req, realm, key_str=None, operation='set', name=None, value=None):
if 'TEST_VIEW' not in req.perm:
raise PermissionError('TEST_VIEW', realm)
if operation == 'set' and 'TEST_MODIFY' not in req.perm:
raise PermissionError('TEST_MODIFY', realm)
# IEnvironmentSetupParticipant methods
def environment_created(self):
self.upgrade_environment(get_db(self.env))
def environment_needs_upgrade(self, db):
if self._need_upgrade(db):
return True
for realm in self.SCHEMA:
realm_metadata = self.SCHEMA[realm]
if need_db_create_for_realm(self.env, realm, realm_metadata, db) or \
need_db_upgrade_for_realm(self.env, realm, realm_metadata, db):
return True
return False
def upgrade_environment(self, db):
# Create or update db
for realm in self.SCHEMA:
realm_metadata = self.SCHEMA[realm]
if need_db_create_for_realm(self.env, realm, realm_metadata, db):
create_db_for_realm(self.env, realm, realm_metadata, db)
elif need_db_upgrade_for_realm(self.env, realm, realm_metadata, db):
upgrade_db_for_realm(self.env, 'testmanager.upgrades', realm, realm_metadata, db)
# Create default values for configuration properties and initialize counters
db_insert_or_ignore(self.env, 'testconfig', 'NEXT_CATALOG_ID', '0')
db_insert_or_ignore(self.env, 'testconfig', 'NEXT_TESTCASE_ID', '0')
db_insert_or_ignore(self.env, 'testconfig', 'NEXT_PLAN_ID', '0')
# Create the basic "TC" Wiki page, used as the root test catalog
tc_page = WikiPage(self.env, 'TC')
if not tc_page.exists:
tc_page.text = ' '
tc_page.save('System', '', '127.0.0.1')
if self._need_upgrade(db):
# Set custom ticket field to hold related test case
custom = self.config['ticket-custom']
config_dirty = False
if 'testcaseid' not in custom:
custom.set('testcaseid', 'text')
custom.set('testcaseid.label', _("Test Case"))
config_dirty = True
if 'planid' not in custom:
custom.set('planid', 'text')
custom.set('planid.label', _("Test Plan"))
config_dirty = True
# Set config section for test case outcomes
if 'test-outcomes' not in self.config:
self.config.set('test-outcomes', 'green.SUCCESSFUL', _("Successful"))
self.config.set('test-outcomes', 'yellow.TO_BE_TESTED', _("Untested"))
self.config.set('test-outcomes', 'red.FAILED', _("Failed"))
self.config.set('test-outcomes', 'default', 'TO_BE_TESTED')
config_dirty = True
# Set config section for default visible columns in tabular view
if self.config.get('testmanager', 'testcatalog.visible_description') == '':
self.config.set('testmanager', 'testcatalog.visible_description', 'False')
config_dirty = True
if config_dirty:
self.config.save()
def _need_upgrade(self, db):
# Check for custom ticket field to hold related test case
custom = self.config['ticket-custom']
if 'testcaseid' not in custom or 'planid' not in custom:
return True
# Check for config section for test case outcomes
if 'test-outcomes' not in self.config:
return True
if 'testmanager' not in self.config:
return True
return False
| gpl-3.0 | 4,519,438,844,586,283,500 | 37.484513 | 208 | 0.538431 | false |
LamCiuLoeng/bbb | rpac/model/interface.py | 1 | 2192 | # -*- coding: utf-8 -*-
import json
from datetime import datetime as dt
from sqlalchemy import Column
from sqlalchemy.types import Integer, DateTime, Text
from sqlalchemy.sql.expression import and_, desc
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import synonym
from tg import request
from rpac.model import qry
__all__ = ['SysMixin', ]
def getUserID():
user_id = 1
try:
user_id = request.identity["user"].user_id
finally:
return user_id
class SysMixin( object ):
remark = Column( 'remark', Text, doc = u'Remark' )
createTime = Column( 'create_time', DateTime, default = dt.now )
updateTime = Column( 'update_time', DateTime, default = dt.now )
createById = Column( 'create_by_id', Integer, default = getUserID )
updateById = Column( 'update_by_id', Integer, default = getUserID )
sysCreateTime = Column( 'system_create_time', DateTime, default = dt.now )
sysUpdateTime = Column( 'system_update_time', DateTime, default = dt.now, onupdate = dt.now )
active = Column( 'active', Integer, default = 0 ) # 0 is active ,1 is inactive
@property
def createBy( self ):
from auth import User
return qry( User ).get( self.createById )
@property
def updateBy( self ):
from auth import User
return qry( User ).get( self.updateById )
@property
def approveBy( self ):
from auth import User
return qry( User ).get( self.approveById )
def _getAttachment( self ):
from logic import FileObject
ids = filter( bool, self._attachment.split( "|" ) )
if not ids : return []
return qry( FileObject ).filter( and_( FileObject.active == 0, FileObject.id.in_( ids ) ) ).order_by( FileObject.id )
def _setAttachment( self, v ):
ids = None
if v :
if type( v ) == list:
ids = "|".join( map( unicode, v ) )
elif isinstance( v, basestring ):
ids = v
self._attachment = ids
@declared_attr
def attachment( self ): return synonym( '_attachment', descriptor = property( self._getAttachment, self._setAttachment ) )
| mit | -8,494,137,192,439,039,000 | 29.027397 | 126 | 0.626825 | false |
0111001101111010/cs595-f13 | assignment2/q3/carbondate-master/getFirstAppearanceInArchives.py | 1 | 7051 | import re
import time
import urllib2
import os
import sys
import datetime
import urllib
import simplejson
import calendar
import commands
import math
from datetime import datetime
def getMementos(uri):
uri = uri.replace(' ', '')
orginalExpression = re.compile( r"<http://[A-Za-z0-9.:=/%-_ ]*>; rel=\"original\"," )
mementoExpression = re.compile( r"<http://[A-Za-z0-9.:=/&,%-_ \?]*>;rel=\"(memento|first memento|last memento|first memento last memento|first last memento)\";datetime=\"(Sat|Sun|Mon|Tue|Wed|Thu|Fri), \d{2} (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) (19|20)\d\d \d\d:\d\d:\d\d GMT\"" )
zeroMementoExpression = re.compile(r"Resource: http://[A-Za-z0-9.:=/&,%-_ ]*")
baseURI = 'http://mementoproxy.cs.odu.edu/aggr/timemap/link/'
memento_list = []
try:
search_results = urllib.urlopen(baseURI+uri)
the_page = search_results.read()
timemapList = the_page.split('\n')
count = 0
for line in timemapList:
if count <= 1:
if line.find('Resource not in archive') > -1:
result = zeroMementoExpression.search( line )
count = count + 1
continue
elif count == 2:
result = orginalExpression.search( line )
if result:
originalResult = result.group(0)
originalUri = originalResult[1:len(originalResult)-17]
else:
if(line.find("</memento")>0):
line = line.replace("</memento", "<http://api.wayback.archive.org/memento")
loc = line.find('>;rel="')
tofind = ';datetime="'
loc2 = line.find(tofind)
if(loc!=-1 and loc2!=-1):
mementoURL = line[2:loc]
timestamp = line[loc2+len(tofind):line.find('"',loc2+len(tofind)+3)]
epoch = int(calendar.timegm(time.strptime(timestamp, '%a, %d %b %Y %H:%M:%S %Z')))
day_string = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(epoch))
uri = mementoURL
cdlib = 'webarchives.cdlib.org'
archiefweb = 'enterprise.archiefweb.eu'
webARchive= 'api.wayback.archive.org'
yahoo1 = 'uk.wrs.yahoo.com'
yahoo2 = 'rds.yahoo.com'
yahoo3 = 'wrs.yahoo.com'
diigo = 'www.diigo.com'
bing = 'cc.bingj.com'
wayback = 'wayback.archive-it.org'
webArchiveNationalUK = 'webarchive.nationalarchives.gov.uk'
webHarvest = 'webharvest.gov'
webArchiveOrgUK = 'www.webarchive.org.uk'
webCitation = 'webcitation.org'
mementoWayBack='memento.waybackmachine.org'
type = ''
category = ''
# @type uri str
if (uri.find(webARchive)!=-1):
type = 'Internet Archive'
category = 'IA'
elif (uri.find(yahoo1)!=-1 or uri.find(yahoo2)!=-1 or uri.find(yahoo3)!=-1):
type = 'Yahoo'
category = 'SE'
elif (uri.find(diigo)!=-1):
type = 'diigo'
category = 'Others'
elif (uri.find(bing)!=-1):
type = 'Bing'
category = 'SE'
elif (uri.find(wayback)!=-1):
type = 'Archive-It'
category = 'Others'
elif (uri.find(webArchiveNationalUK)!=-1):
type = 'UK National Archive'
category = 'Others'
elif (uri.find(webHarvest)!=-1):
type = 'Web Harvest'
category = 'Others'
elif (uri.find(webArchiveOrgUK)!=-1):
type = 'UK Web Archive'
category = 'Others'
elif (uri.find(webCitation)!=-1):
type = 'Web Citation'
category = 'Others'
elif (uri.find(cdlib)!=-1):
type = 'CD Lib'
category = 'Others'
elif (uri.find(archiefweb)!=-1):
type = 'ArchiefWeb'
category = 'Others'
elif (uri.find(mementoWayBack)!=-1):
type = 'Wayback Machine'
category = 'Others'
else:
type = 'Not Known'
category = 'Others'
memento = {}
memento["type"] = type
memento["category"] = category
memento["time"] = day_string
memento["link"] = mementoURL
memento["link"] = urllib.quote(memento["link"])
memento["link"] = memento["link"].replace("http%3A//", "http://")
memento["link"] = memento["link"][memento["link"].find("http://"):]
memento_list.append(memento)
else:
pass
count = count + 1
except urllib2.URLError:
pass
return memento_list
def isInPage(url,page):
co = 'curl -i --silent -L -A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.112 Safari/534.30" "'+page+'"'
page = commands.getoutput(co)
loc = page.find(url)
date = ""
if(loc==-1):
return False, date
to_find = "X-Archive-Orig-Last-modified: "
loc = page.find(to_find)
if(loc !=-1):
end = page.find("\r", loc)
date = page[loc+len(to_find):end]
date = date.strip()
if(date ==""):
to_find = "X-Archive-Orig-Date: "
loc = page.find(to_find)
if(loc !=-1):
end = page.find("\r", loc)
date = page[loc+len(to_find):end]
date = date.strip()
epoch = int(calendar.timegm(time.strptime(date, '%a, %d %b %Y %H:%M:%S %Z')))
date = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(epoch))
return True, date
def getFirstAppearance(url, inurl):
try:
mementos = getMementos(inurl)
if(len(mementos) == 0):
return ""
start = 0
end = len(mementos)
previous = -1
i = 0
foundbefore = False
count = 0
for mem in mementos:
res, date = isInPage(url,mem["link"])
if(res==True):
break
while(True):
res, date = isInPage(url,mementos[i]["link"])
if(res==True and i==0):
return date
if(int(math.fabs(previous-i))==0):
return ""
if( (res==True and int(math.fabs(previous-i))==1 and foundbefore == False) or (res==False and int(math.fabs(previous-i))==1 and foundbefore == True) ):
return date
previous = i
if(res == False):
start = i
i = (end-start)/2 + start
foundbefore = False
else:
end = i
i = (end-start)/2 + start
foundbefore = True
count = count + 1
except:
print sys.exc_info()
| mit | -4,108,445,885,518,813,000 | 32.259434 | 296 | 0.494823 | false |
domoran/dxlparser | dxlparser/test/Preprocessor_Test.py | 1 | 1393 | # -*- coding: utf-8 -*-
from dxlparser import DXLPreprocessor
def preprocess(data, expected):
result = DXLPreprocessor().preprocess(data)
if not result == expected:
print ("Expected: |" + expected + "|\nObserved: |" + result + "|\n")
assert result == expected
testdata = [
("", ""), # empty string
("Hallo \n", "Hallo \n"), # simple statement
("Hallo // single line Comment\nSecond Line", "Hallo \nSecond Line"), # normal single line comment
("Hallo // single line Comment-\nSecond Line", "Hallo Second Line"), # single line comment ending with - lf
("Hallo // single line Comment-\r\nSecond Line", "Hallo Second Line"), # single line comment ending with - cr lf
("Hallo // single line Comment- \r\nSecond Line", "Hallo \nSecond Line"), # single line comment with minus in middle
("Multi/*Line*/Comment", "MultiComment"), # multi line comment 1
("Multi/*Li/*\nne*/Comment", "MultiComment"), # multi line comment 2
("Multi\n/*\nne*/\r\nComment", "Multi\n\r\nComment"), # multi line comment 2
# real code test
("""
int c = 4 /* important */
string s = "some text" //-
"more text"
int d = 5;""",
"""
int c = 4
string s = "some text" "more text"
int d = 5;"""
),
]
def test_preprocessor():
for data, expected in testdata:
yield preprocess, data, expected
| gpl-3.0 | -1,977,564,771,191,190,500 | 31.214286 | 120 | 0.601579 | false |
hhorak/rebase-helper | setup.py | 1 | 3477 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This tool helps you to rebase package to the latest version
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# he Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors: Petr Hracek <[email protected]>
# Tomas Hozza <[email protected]>
from __future__ import print_function
import subprocess
import os
from rebasehelper.version import VERSION
try:
from setuptools import setup, Command
except:
from distutils.core import setup, Command
class PyTest(Command):
user_options = [('test-runner=',
't',
'test runner to use; by default, multiple py.test runners are tried')]
command_consumes_arguments = True
def initialize_options(self):
self.test_runner = None
self.args = []
def finalize_options(self):
pass
def runner_exists(self, runner):
syspaths = os.getenv('PATH').split(os.pathsep)
for p in syspaths:
if os.path.exists(os.path.join(p, runner)):
return True
return False
def run(self):
# only one test runner => just run the tests
supported = ['2.7', '3.3']
potential_runners = ['py.test-' + s for s in supported]
if self.test_runner:
potential_runners = [self.test_runner]
runners = [pr for pr in potential_runners if self.runner_exists(pr)]
for runner in runners:
if len(runners) > 1:
print('\n' * 2)
print('Running tests using "{0}":'.format(runner))
retcode = 0
cmd = [runner]
for a in self.args:
cmd.append(a)
cmd.append('-v')
cmd.append('test')
t = subprocess.Popen(cmd)
rc = t.wait()
retcode = t.returncode or retcode
raise SystemExit(retcode)
setup(
name='rebasehelper',
version=VERSION,
description='RebaseHelper helps you to rebase your packages.',
keywords='packages,easy,quick',
author='Petr Hracek',
author_email='[email protected]',
url='https://github.com/phracek/rebase-helper',
license='GPLv2+',
packages=['rebasehelper'],
include_package_data=True,
entry_points={'console_scripts': ['rebase-helper=rebasehelper.cli:CliHelper.run']},
setup_requires=[],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Software Development',
],
cmdclass={'test': PyTest}
)
| gpl-2.0 | 490,362,676,183,140,000 | 32.432692 | 96 | 0.614323 | false |
ncc-unesp/goo-server | docs/home/conf.py | 1 | 5772 | # -*- coding: utf-8 -*-
#
# goo-server api documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 19 10:15:12 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinxcontrib.httpdomain']
DOC_SOURCE_PATH = os.path.realpath(os.path.dirname(__file__))
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
testlevel = 2
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Goo Documentation'
copyright = u'2012, NCC - UNESP'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap-home'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["../themes/"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'goo-docs'
| gpl-3.0 | -7,032,880,810,451,925,000 | 32.754386 | 82 | 0.721414 | false |
FMNSSun/PyPLUSPacket | pluspacket/packet.py | 1 | 8518 | import struct
_fmt_u64 = ">Q"
_fmt_u32 = ">L"
_magic_shift = 4
_flags_mask = 0x0F
_default_magic = 0xd8007ff
_min_packet_len = 20
_l_mask = 0x08
_r_mask = 0x04
_s_mask = 0x02
_x_mask = 0x01
_cat_pos = (4, 12)
_psn_pos = (12, 16)
_pse_pos = (16, 20)
_magic_pos = (0, 4)
_udp_header_len = 8
_pcf_type_plus_payload = 0xFF
PCF_INTEGRITY_FULL = 0x03
PCF_INTEGRITY_HALF = 0x02
PCF_INTEGRITY_QUARTER = 0x01
PCF_INTEGRITY_ZERO = 0x00
def _get_u32(s):
"""
Returns s -> u32
"""
return struct.unpack(_fmt_u32, s)[0]
def _get_u64(s):
"""
Returns s -> u64
"""
return struct.unpack(_fmt_u64, s)[0]
def _put_u64(i, buf):
"""
Writes an u64
"""
buf += struct.pack(_fmt_u64, i)
def _put_u32(i, buf):
"""
Writes an u32
"""
buf += struct.pack(_fmt_u32, i)
def get_psn(buf):
"""
Extracts PSN out of a buffer. It's the caller's responsibility
to make sure that buffer is large enough.
"""
return _get_u32(buf[_psn_pos[0] : _psn_pos[1]])
def get_pse(buf):
"""
Extracts PSE out of a buffer. It's the caller's responsibility
to make sure that buffer is large enough.
"""
return _get_u32(buf[_pse_pos[0] : _pse_pos[1]])
def get_cat(buf):
"""
Extracts CAT out of a buffer. It's the caller's responsibility
to make sure that buffer is large enough.
"""
return _get_u64(buf[_cat_pos[0] : _cat_pos[1]])
def get_magic(buf):
"""
Extracts Magic out of a buffer. It's the caller's responsibility
to make sure that buffer is large enough.
"""
return _get_u32(buf[_magic_pos[0] : _magic_pos[1]]) >> _magic_shift
def get_flags(buf):
"""
Returns the flags as ORed bits.
"""
return _get_u32(buf[_magic_pos[0] : _magic_pos[1]]) & _flags_mask
def get_l(buf):
"""
Returns True if L is set, otherwise False
"""
return bool(get_flags(buf) & _l_mask)
def get_r(buf):
"""
Returns True if R is set, otherwise False
"""
return bool(get_flags(buf) & _r_mask)
def get_s(buf):
"""
Returns True if S is set, otherwise False
"""
return bool(get_flags(buf) & _s_mask)
def get_x(buf):
"""
Returns True if X is set, otherwise False
"""
return bool(get_flags(buf) & _x_mask)
def is_extended_packet(buf):
"""
Just an alias for get_x.
"""
return get_x(buf)
def parse_packet(buf):
"""
Parses a packet completely. This is a wrapper for the from_bytes method
of the Packet class.
"""
return Packet().from_bytes(buf)
def detect_plus_in_udp(buf):
"""
Tries to detect the presence of a PLUS header in UDP (incl. header)
"""
if len(buf) < _udp_header_len:
raise ValueError("Buffer too small. UDP header is at least 8 bytes long.")
udp_payload = buf[_udp_header_len:]
return detect_plus(udp_payload)
def detect_plus(buf):
"""
Tries to detect the presence of a PLUS header in payload (excl. UDP header)
"""
if len(buf) < _min_packet_len:
# Technically the magic value could be present here but if the packet
# is this small then there can't be a complete basic header present and
# this is best counted as 'not plus'.
return False
magic = get_magic(buf)
return magic == _default_magic
def _any(xs):
for x in xs:
if x:
return True
return False
def new_basic_packet(l, r, s, cat, psn, pse, payload):
"""
Creates a new packet with a basic header.
"""
p = Packet()
p.l = l
p.r = r
p.s = s
p.cat = cat
p.psn = psn
p.pse = pse
p.payload = payload
p.x = False
if not p.is_valid():
raise ValueError("Illegal combination of arguments!")
return p
def new_extended_packet(l, r, s, cat, psn, pse, pcf_type, pcf_integrity, pcf_value, payload):
"""
Creates a new packet with an extended header.
"""
p = new_basic_packet(l, r, s, cat, psn, pse, payload)
p.x = True
if pcf_value == None and pcf_type != _pcf_type_plus_payload:
p.pcf_len = None
elif pcf_value == None:
p.pcf_len = None
else:
p.pcf_len = len(pcf_value)
p.pcf_type = pcf_type
p.pcf_integrity = pcf_integrity
p.pcf_value = pcf_value
if not p.is_valid():
raise ValueError("Illegal combination of arguments!")
return p
class Packet():
def __init__(self):
"""
Creates a zero packet.
"""
# Initialize all the fields to None
self.psn = None
self.pse = None
self.cat = None
self.pcf_integrity = None
self.pcf_value = None
self.pcf_len = None
self.pcf_type = None
self.l = None
self.r = None
self.s = None
self.x = None
self.payload = None
self.magic = _default_magic
def to_dict(self):
return {
"psn" : self.psn,
"pse" : self.pse,
"cat" : self.cat,
"pcf_integrity" : self.pcf_integrity,
"pcf_value" : self.pcf_value,
"pcf_type" : self.pcf_type,
"l" : self.l,
"r" : self.r,
"s" : self.s,
"x" : self.x,
"magic" : self.magic,
"payload" : self.payload
}
def is_valid(self):
"""
Returns true if the packet's attributes/fields are in a valid state.
"""
if _any ([ self.psn == None, self.pse == None,
self.cat == None, self.magic == None,
self.l == None, self.r == None,
self.s == None, self.x == None]):
return False
if not self.x:
return True
if self.pcf_type == None:
return False
if self.pcf_type == 0x00:
return False
if self.pcf_type == _pcf_type_plus_payload:
if _any ([ self.pcf_integrity != None,
self.pcf_len != None,
self.pcf_value != None]):
return False
return True
if _any ([ self.pcf_integrity == None,
self.pcf_len == None,
self.pcf_value == None]):
return False
if self.pcf_len != len(self.pcf_value):
return False
if self.pcf_len > 63:
return False
if self.pcf_integrity < 0 or self.pcf_integrity > 3:
return False
return True
def from_bytes(self, bytes):
"""
Parses a packet from bytes. This function does not set PCF Integrity to zero
if PCF Len is zero. If you want that behaviour as mentioned in the PLUS spec
you must do this yourself.
"""
if len(bytes) < _min_packet_len:
raise ValueError("Minimum length of a PLUS packet is 20 bytes.")
magicAndFlags = _get_u32(bytes[_magic_pos[0] : _magic_pos[1]])
magic = magicAndFlags >> _magic_shift
if magic != _default_magic:
raise ValueError("Invalid Magic value.")
self.magic = magic
flags = magicAndFlags & _flags_mask
self.l = bool(flags & _l_mask)
self.r = bool(flags & _r_mask)
self.s = bool(flags & _s_mask)
self.x = bool(flags & _x_mask)
self.cat = _get_u64(bytes[_cat_pos[0] : _cat_pos[1]])
self.psn = _get_u32(bytes[_psn_pos[0] : _psn_pos[1]])
self.pse = _get_u32(bytes[_pse_pos[0] : _pse_pos[1]])
if not self.x:
self.payload = bytes[_min_packet_len:]
else:
self._extended(bytes[_min_packet_len:])
return self
def _extended(self, buf):
"""
Internal. Continues parsing extended headers.
"""
if len(buf) < 1:
raise ValueError("Extended header must have PCF_TYPE")
pcf_type = buf[0]
if pcf_type == 0xFF:
# This means no pcf_integry, pcf_len, pcf_value is present.
self.payload = buf[1:]
self.pcf_type = pcf_type
else:
if pcf_type == 0x00:
# One additional pcf_type byte
buf = buf[1:]
if len(buf) == 0:
raise ValueError("Missing additional PCF_TYPE byte")
pcf_type = buf[0] << 8
buf = buf[1:]
if len(buf) == 0:
raise ValueError("Missing PCF_LEN and PCF_INTEGRITY")
pcf_leni = buf[0]
pcf_len = pcf_leni >> 2
pcf_integrity = pcf_leni & 0x03
buf = buf[1:]
if len(buf) < pcf_len:
raise ValueError("Incomplete PCF_VALUE")
pcf_value = buf[:pcf_len]
payload = buf[pcf_len:]
self.pcf_len = pcf_len
self.pcf_integrity = pcf_integrity
self.pcf_value = pcf_value
self.payload = payload
self.pcf_type = pcf_type
def to_bytes(self):
"""
Unparses the packet to bytes.
"""
if not self.is_valid():
raise ValueError("Internal state is not valid!")
buf = bytearray()
magicAndFlags = self.magic << 4
if self.l: magicAndFlags |= _l_mask
if self.r: magicAndFlags |= _r_mask
if self.s: magicAndFlags |= _s_mask
if self.x: magicAndFlags |= _x_mask
_put_u32(magicAndFlags, buf)
_put_u64(self.cat, buf)
_put_u32(self.psn, buf)
_put_u32(self.pse, buf)
if not self.x:
buf += self.payload
return buf
if self.pcf_type == 0xFF:
buf.append(0xFF)
buf += self.payload
return buf
if self.pcf_type & 0x00FF == 0:
pcf_type = self.pcf_type >> 8
buf.append(0x00)
buf.append(pcf_type)
else:
buf.append(self.pcf_type)
buf.append(self.pcf_len << 2 | self.pcf_integrity)
buf += self.pcf_value
buf += self.payload
return buf
| bsd-2-clause | 4,748,410,429,267,642,000 | 17.679825 | 93 | 0.627495 | false |
Intel-Corporation/tensorflow | tensorflow/examples/saved_model/integration_tests/saved_model_test.py | 1 | 3365 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel integration tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
class SavedModelTest(tf.test.TestCase):
def assertCommandSucceeded(self, script_name, **flags):
"""Runs a test script via run_script."""
run_script = resource_loader.get_path_to_datafile("run_script")
command_parts = [run_script]
for flag_key, flag_value in flags.items():
command_parts.append("--%s=%s" % (flag_key, flag_value))
env = dict(TF2_BEHAVIOR="enabled", SCRIPT_NAME=script_name)
logging.info("Running: %s with environment flags %s" % (command_parts, env))
subprocess.check_call(command_parts, env=dict(os.environ, **env))
def test_text_rnn(self):
export_dir = self.get_temp_dir()
self.assertCommandSucceeded("export_text_rnn_model", export_dir=export_dir)
self.assertCommandSucceeded("use_text_rnn_model", model_dir=export_dir)
def test_rnn_cell(self):
export_dir = self.get_temp_dir()
self.assertCommandSucceeded("export_rnn_cell", export_dir=export_dir)
self.assertCommandSucceeded("use_rnn_cell", model_dir=export_dir)
def test_text_embedding_in_sequential_keras(self):
export_dir = self.get_temp_dir()
self.assertCommandSucceeded(
"export_simple_text_embedding", export_dir=export_dir)
self.assertCommandSucceeded(
"use_model_in_sequential_keras", model_dir=export_dir)
def test_text_embedding_in_dataset(self):
export_dir = self.get_temp_dir()
self.assertCommandSucceeded(
"export_simple_text_embedding", export_dir=export_dir)
self.assertCommandSucceeded(
"use_text_embedding_in_dataset", model_dir=export_dir)
def test_mnist_cnn(self):
export_dir = self.get_temp_dir()
self.assertCommandSucceeded(
"export_mnist_cnn", export_dir=export_dir, fast_test_mode="true")
self.assertCommandSucceeded(
"use_mnist_cnn", export_dir=export_dir, fast_test_mode="true")
def test_mnist_cnn_with_mirrored_strategy(self):
self.skipTest(
"b/129134185 - saved model and distribution strategy integration")
export_dir = self.get_temp_dir()
self.assertCommandSucceeded(
"export_mnist_cnn",
export_dir=export_dir,
fast_test_mode="true")
self.assertCommandSucceeded(
"use_mnist_cnn",
export_dir=export_dir,
fast_test_mode="true",
use_mirrored_strategy=True,
)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 7,018,668,818,503,632,000 | 36.808989 | 80 | 0.69153 | false |
markrwilliams/pydivsufsort | setup.py | 1 | 1025 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
from os.path import exists
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.build_py import build_py as _build_py
from Cython.Distutils import build_ext
class build_py(_build_py):
if exists('lib/Makefile'):
make_clean = subprocess.Popen(['make', 'distclean'], cwd='lib/')
make_clean.wait()
configure = subprocess.Popen(['./configure', '--with-pic'],
cwd='lib/')
configure.wait()
make = subprocess.Popen(['make', '-j'], cwd='lib/')
make.wait()
setup(
cmdclass={'build_py': build_py,
'build_ext': build_ext},
name='divsufsort',
ext_modules=[
Extension(
"suffixarray",
sources=['src/suffix.pyx'],
libraries=['lib/lib/.libs/libdivsufsort.a'],
extra_objects=['lib/lib/.libs/libdivsufsort.a'],
include_dirs=['lib/include'],
language="c")])
| mit | 5,025,191,733,688,460,000 | 29.147059 | 72 | 0.590244 | false |
googleapis/python-websecurityscanner | google/cloud/websecurityscanner_v1/types/scan_config.py | 1 | 9809 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.websecurityscanner.v1", manifest={"ScanConfig",},
)
class ScanConfig(proto.Message):
r"""A ScanConfig resource contains the configurations to launch a
scan.
Attributes:
name (str):
The resource name of the ScanConfig. The name
follows the format of
'projects/{projectId}/scanConfigs/{scanConfigId}'.
The ScanConfig IDs are generated by the system.
display_name (str):
Required. The user provided display name of
the ScanConfig.
max_qps (int):
The maximum QPS during scanning. A valid value ranges from 5
to 20 inclusively. If the field is unspecified or its value
is set 0, server will default to 15. Other values outside of
[5, 20] range will be rejected with INVALID_ARGUMENT error.
starting_urls (Sequence[str]):
Required. The starting URLs from which the
scanner finds site pages.
authentication (google.cloud.websecurityscanner_v1.types.ScanConfig.Authentication):
The authentication configuration. If
specified, service will use the authentication
configuration during scanning.
user_agent (google.cloud.websecurityscanner_v1.types.ScanConfig.UserAgent):
The user agent used during scanning.
blacklist_patterns (Sequence[str]):
The excluded URL patterns as described in
https://cloud.google.com/security-command-center/docs/how-to-use-web-security-scanner#excluding_urls
schedule (google.cloud.websecurityscanner_v1.types.ScanConfig.Schedule):
The schedule of the ScanConfig.
export_to_security_command_center (google.cloud.websecurityscanner_v1.types.ScanConfig.ExportToSecurityCommandCenter):
Controls export of scan configurations and
results to Security Command Center.
risk_level (google.cloud.websecurityscanner_v1.types.ScanConfig.RiskLevel):
The risk level selected for the scan
managed_scan (bool):
Whether the scan config is managed by Web
Security Scanner, output only.
static_ip_scan (bool):
Whether the scan configuration has enabled
static IP address scan feature. If enabled, the
scanner will access applications from static IP
addresses.
"""
class UserAgent(proto.Enum):
r"""Type of user agents used for scanning."""
USER_AGENT_UNSPECIFIED = 0
CHROME_LINUX = 1
CHROME_ANDROID = 2
SAFARI_IPHONE = 3
class RiskLevel(proto.Enum):
r"""Scan risk levels supported by Web Security Scanner. LOW
impact scanning will minimize requests with the potential to
modify data. To achieve the maximum scan coverage, NORMAL risk
level is recommended.
"""
RISK_LEVEL_UNSPECIFIED = 0
NORMAL = 1
LOW = 2
class ExportToSecurityCommandCenter(proto.Enum):
r"""Controls export of scan configurations and results to
Security Command Center.
"""
EXPORT_TO_SECURITY_COMMAND_CENTER_UNSPECIFIED = 0
ENABLED = 1
DISABLED = 2
class Authentication(proto.Message):
r"""Scan authentication configuration.
Attributes:
google_account (google.cloud.websecurityscanner_v1.types.ScanConfig.Authentication.GoogleAccount):
Authentication using a Google account.
custom_account (google.cloud.websecurityscanner_v1.types.ScanConfig.Authentication.CustomAccount):
Authentication using a custom account.
iap_credential (google.cloud.websecurityscanner_v1.types.ScanConfig.Authentication.IapCredential):
Authentication using Identity-Aware-Proxy
(IAP).
"""
class GoogleAccount(proto.Message):
r"""Describes authentication configuration that uses a Google
account.
Attributes:
username (str):
Required. The user name of the Google
account.
password (str):
Required. Input only. The password of the
Google account. The credential is stored
encrypted and not returned in any response nor
included in audit logs.
"""
username = proto.Field(proto.STRING, number=1,)
password = proto.Field(proto.STRING, number=2,)
class CustomAccount(proto.Message):
r"""Describes authentication configuration that uses a custom
account.
Attributes:
username (str):
Required. The user name of the custom
account.
password (str):
Required. Input only. The password of the
custom account. The credential is stored
encrypted and not returned in any response nor
included in audit logs.
login_url (str):
Required. The login form URL of the website.
"""
username = proto.Field(proto.STRING, number=1,)
password = proto.Field(proto.STRING, number=2,)
login_url = proto.Field(proto.STRING, number=3,)
class IapCredential(proto.Message):
r"""Describes authentication configuration for Identity-Aware-
roxy (IAP).
Attributes:
iap_test_service_account_info (google.cloud.websecurityscanner_v1.types.ScanConfig.Authentication.IapCredential.IapTestServiceAccountInfo):
Authentication configuration when Web-
ecurity-Scanner service account is added in
Identity-Aware-Proxy (IAP) access policies.
"""
class IapTestServiceAccountInfo(proto.Message):
r"""Describes authentication configuration when Web-Security-
canner service account is added in Identity-Aware-Proxy (IAP)
access policies.
Attributes:
target_audience_client_id (str):
Required. Describes OAuth2 client id of
resources protected by Identity-Aware-Proxy
(IAP).
"""
target_audience_client_id = proto.Field(proto.STRING, number=1,)
iap_test_service_account_info = proto.Field(
proto.MESSAGE,
number=1,
oneof="iap_credentials",
message="ScanConfig.Authentication.IapCredential.IapTestServiceAccountInfo",
)
google_account = proto.Field(
proto.MESSAGE,
number=1,
oneof="authentication",
message="ScanConfig.Authentication.GoogleAccount",
)
custom_account = proto.Field(
proto.MESSAGE,
number=2,
oneof="authentication",
message="ScanConfig.Authentication.CustomAccount",
)
iap_credential = proto.Field(
proto.MESSAGE,
number=4,
oneof="authentication",
message="ScanConfig.Authentication.IapCredential",
)
class Schedule(proto.Message):
r"""Scan schedule configuration.
Attributes:
schedule_time (google.protobuf.timestamp_pb2.Timestamp):
A timestamp indicates when the next run will
be scheduled. The value is refreshed by the
server after each run. If unspecified, it will
default to current server time, which means the
scan will be scheduled to start immediately.
interval_duration_days (int):
Required. The duration of time between
executions in days.
"""
schedule_time = proto.Field(
proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,
)
interval_duration_days = proto.Field(proto.INT32, number=2,)
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
max_qps = proto.Field(proto.INT32, number=3,)
starting_urls = proto.RepeatedField(proto.STRING, number=4,)
authentication = proto.Field(proto.MESSAGE, number=5, message=Authentication,)
user_agent = proto.Field(proto.ENUM, number=6, enum=UserAgent,)
blacklist_patterns = proto.RepeatedField(proto.STRING, number=7,)
schedule = proto.Field(proto.MESSAGE, number=8, message=Schedule,)
export_to_security_command_center = proto.Field(
proto.ENUM, number=10, enum=ExportToSecurityCommandCenter,
)
risk_level = proto.Field(proto.ENUM, number=12, enum=RiskLevel,)
managed_scan = proto.Field(proto.BOOL, number=13,)
static_ip_scan = proto.Field(proto.BOOL, number=14,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -5,182,692,759,453,523,000 | 40.740426 | 155 | 0.619941 | false |
akintolga/superdesk-core | apps/publish/content/tests.py | 1 | 45700 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from bson.objectid import ObjectId
from copy import copy
from datetime import timedelta
import os
import json
from eve.utils import config, ParsedRequest
from eve.versioning import versioned_id_field
from apps.packages.package_service import PackageService
from apps.publish.content.publish import ArchivePublishService
from superdesk.publish.subscribers import SUBSCRIBER_TYPES
from apps.validators import ValidatorsPopulateCommand
from superdesk.metadata.packages import RESIDREF
from test_factory import SuperdeskTestCase
from superdesk.publish import init_app, publish_queue
from superdesk.utc import utcnow
from superdesk import get_resource_service
import superdesk
from apps.archive.archive import SOURCE as ARCHIVE
from superdesk.metadata.item import TAKES_PACKAGE, PACKAGE_TYPE, ITEM_STATE, CONTENT_STATE, ITEM_TYPE, CONTENT_TYPE
from apps.publish.published_item import LAST_PUBLISHED_VERSION
from unittest import mock
from unittest.mock import MagicMock, patch
from apps.publish.enqueue.enqueue_service import EnqueueService
from apps.publish.enqueue.enqueue_published import EnqueuePublishedService
from apps.publish.enqueue import enqueue_published
from superdesk.media.crop import CropService
ARCHIVE_PUBLISH = 'archive_publish'
ARCHIVE_CORRECT = 'archive_correct'
ARCHIVE_KILL = 'archive_kill'
PUBLISH_QUEUE = 'publish_queue'
PUBLISHED = 'published'
@mock.patch('superdesk.publish.subscribers.SubscribersService.generate_sequence_number', lambda self, subscriber: 1)
class ArchivePublishTestCase(SuperdeskTestCase):
def init_data(self):
self.users = [{'_id': '1', 'username': 'admin'}]
self.desks = [{'_id': ObjectId('123456789ABCDEF123456789'), 'name': 'desk1'}]
self.products = [{"_id": "1", "name": "prod1"},
{"_id": "2", "name": "prod2", "codes": "abc,def"},
{"_id": "3", "name": "prod3", "codes": "xyz"}]
self.subscribers = [{"_id": "1", "name": "sub1", "is_active": True, "subscriber_type": SUBSCRIBER_TYPES.WIRE,
"media_type": "media", "sequence_num_settings": {"max": 10, "min": 1},
"email": "[email protected]",
"products": ["1"],
"destinations": [{"name": "dest1", "format": "nitf",
"delivery_type": "ftp",
"config": {"address": "127.0.0.1", "username": "test"}
}]
},
{"_id": "2", "name": "sub2", "is_active": True, "subscriber_type": SUBSCRIBER_TYPES.WIRE,
"media_type": "media", "sequence_num_settings": {"max": 10, "min": 1},
"email": "[email protected]",
"products": ["1"],
"destinations": [{"name": "dest2", "format": "AAP ANPA", "delivery_type": "filecopy",
"config": {"address": "/share/copy"}
},
{"name": "dest3", "format": "AAP ANPA", "delivery_type": "Email",
"config": {"recipients": "[email protected]"}
}]
},
{"_id": "3", "name": "sub3", "is_active": True, "subscriber_type": SUBSCRIBER_TYPES.DIGITAL,
"media_type": "media", "sequence_num_settings": {"max": 10, "min": 1},
"email": "[email protected]",
"products": ["1"],
"destinations": [{"name": "dest1", "format": "nitf",
"delivery_type": "ftp",
"config": {"address": "127.0.0.1", "username": "test"}
}]
},
{"_id": "4", "name": "sub4", "is_active": True, "subscriber_type": SUBSCRIBER_TYPES.WIRE,
"media_type": "media", "sequence_num_settings": {"max": 10, "min": 1},
"geo_restrictions": "New South Wales", "email": "[email protected]",
"products": ["1"],
"destinations": [{"name": "dest1", "format": "nitf",
"delivery_type": "ftp",
"config": {"address": "127.0.0.1", "username": "test"}
}]
},
{"_id": "5", "name": "sub5", "is_active": True, "subscriber_type": SUBSCRIBER_TYPES.ALL,
"media_type": "media", "sequence_num_settings": {"max": 10, "min": 1},
"email": "[email protected]",
"products": ["1", "2"],
"destinations": [{"name": "dest1", "format": "ninjs",
"delivery_type": "ftp",
"config": {"address": "127.0.0.1", "username": "test"}
}]
}]
self.articles = [{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4f9',
'_id': '1',
ITEM_TYPE: CONTENT_TYPE.TEXT,
'last_version': 3,
config.VERSION: 4,
'body_html': 'Test body',
'anpa_category': [{'qcode': 'A', 'name': 'Sport'}],
'urgency': 4,
'headline': 'Two students missing',
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'ednote': 'Andrew Marwood contributed to this article',
'dateline': {'located': {'city': 'Sydney'}},
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
ITEM_STATE: CONTENT_STATE.PUBLISHED,
'expiry': utcnow() + timedelta(minutes=20),
'slugline': 'story slugline',
'unique_name': '#1'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a974-xy4532fe33f9',
'_id': '2',
'last_version': 3,
config.VERSION: 4,
'body_html': 'Test body of the second article',
'slugline': 'story slugline',
'urgency': 4,
'anpa_category': [{'qcode': 'A', 'name': 'Sport'}],
'headline': 'Another two students missing',
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'ednote': 'Andrew Marwood contributed to this article',
'dateline': {'located': {'city': 'Sydney'}},
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'expiry': utcnow() + timedelta(minutes=20),
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
ITEM_STATE: CONTENT_STATE.PROGRESS,
'publish_schedule': "2016-05-30T10:00:00+0000",
ITEM_TYPE: CONTENT_TYPE.TEXT,
'unique_name': '#2'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4fa',
'_id': '3',
'last_version': 3,
config.VERSION: 4,
'body_html': 'Test body',
'slugline': 'story slugline',
'urgency': 4,
'anpa_category': [{'qcode': 'A', 'name': 'Sport'}],
'headline': 'Two students missing killed',
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'ednote': 'Andrew Marwood contributed to this article killed',
'dateline': {'located': {'city': 'Sydney'}},
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
ITEM_STATE: CONTENT_STATE.KILLED,
'expiry': utcnow() + timedelta(minutes=20),
ITEM_TYPE: CONTENT_TYPE.TEXT,
'unique_name': '#3'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4fb',
'_id': '4',
'last_version': 3,
config.VERSION: 4,
'body_html': 'Take-1 body',
'urgency': 4,
'headline': 'Take-1 headline',
'abstract': 'Abstract for take-1',
'anpa_category': [{'qcode': 'A', 'name': 'Sport'}],
'pubstatus': 'done',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'dateline': {'located': {'city': 'Sydney'}},
'slugline': 'taking takes',
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
ITEM_STATE: CONTENT_STATE.PROGRESS,
'expiry': utcnow() + timedelta(minutes=20),
ITEM_TYPE: CONTENT_TYPE.TEXT,
'linked_in_packages': [{"package": "7", "package_type": "takes"}],
'unique_name': '#4'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4fg',
'_id': '5',
'last_version': 3,
config.VERSION: 4,
'body_html': 'Take-2 body',
'urgency': 4,
'headline': 'Take-2 headline',
'abstract': 'Abstract for take-1',
'anpa_category': [{'qcode': 'A', 'name': 'Sport'}],
'pubstatus': 'done',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'dateline': {'located': {'city': 'Sydney'}},
'slugline': 'taking takes',
'linked_in_packages': [{"package": "7", "package_type": "takes"}],
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
ITEM_STATE: CONTENT_STATE.PROGRESS,
'expiry': utcnow() + timedelta(minutes=20),
ITEM_TYPE: CONTENT_TYPE.TEXT,
'unique_name': '#5'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4fc',
'_id': '6',
'last_version': 2,
config.VERSION: 3,
ITEM_TYPE: CONTENT_TYPE.COMPOSITE,
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
'groups': [{'id': 'root', 'refs': [{'idRef': 'main'}], 'role': 'grpRole:NEP'},
{
'id': 'main',
'refs': [
{
'location': ARCHIVE,
'guid': '5',
ITEM_TYPE: CONTENT_TYPE.TEXT,
RESIDREF: '5'
},
{
'location': ARCHIVE,
'guid': '4',
ITEM_TYPE: CONTENT_TYPE.TEXT,
RESIDREF: '4'
}
],
'role': 'grpRole:main'}],
'firstcreated': utcnow(),
'expiry': utcnow() + timedelta(minutes=20),
'unique_name': '#6',
ITEM_STATE: CONTENT_STATE.PROGRESS},
{'guid': 'tag:localhost:2015:ab-69b961-2816-4b8a-a584-a7b402fed4fc',
'_id': '7',
'last_version': 2,
config.VERSION: 3,
ITEM_TYPE: CONTENT_TYPE.COMPOSITE,
'package_type': 'takes',
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
'groups': [{'id': 'root', 'refs': [{'idRef': 'main'}], 'role': 'grpRole:NEP'},
{
'id': 'main',
'refs': [
{
'location': ARCHIVE,
'guid': '5',
'sequence': 1,
ITEM_TYPE: CONTENT_TYPE.TEXT
},
{
'location': ARCHIVE,
'guid': '4',
'sequence': 2,
ITEM_TYPE: CONTENT_TYPE.TEXT
}
],
'role': 'grpRole:main'}],
'firstcreated': utcnow(),
'expiry': utcnow() + timedelta(minutes=20),
'sequence': 2,
'state': 'draft',
'unique_name': '#7'},
{'guid': '8',
'_id': '8',
'last_version': 3,
config.VERSION: 4,
'targeted_for': [{'name': 'New South Wales', 'allow': True}],
'body_html': 'Take-1 body',
'urgency': 4,
'headline': 'Take-1 headline',
'abstract': 'Abstract for take-1',
'anpa_category': [{'qcode': 'A', 'name': 'Sport'}],
'pubstatus': 'done',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'dateline': {'located': {'city': 'Sydney'}},
'slugline': 'taking takes',
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
ITEM_STATE: CONTENT_STATE.PROGRESS,
'expiry': utcnow() + timedelta(minutes=20),
ITEM_TYPE: CONTENT_TYPE.TEXT,
'unique_name': '#8'},
{'_id': '9',
'urgency': 3,
'headline': 'creator',
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
ITEM_STATE: CONTENT_STATE.FETCHED},
{'guid': 'tag:localhost:2015:69b961ab-a7b402fed4fb',
'_id': 'test_item_9',
'last_version': 3,
config.VERSION: 4,
'body_html': 'Student Crime. Police Missing.',
'urgency': 4,
'headline': 'Police Missing',
'abstract': 'Police Missing',
'anpa_category': [{'qcode': 'A', 'name': 'Australian General News'}],
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'dateline': {'located': {'city': 'Sydney'}},
'slugline': 'Police Missing',
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
ITEM_STATE: CONTENT_STATE.PROGRESS,
ITEM_TYPE: CONTENT_TYPE.TEXT,
'unique_name': '#9'},
{'guid': 'tag:localhost:10:10:10:2015:69b961ab-2816-4b8a-a584-a7b402fed4fc',
'_id': '100',
config.VERSION: 3,
'task': {'user': '1', 'desk': '123456789ABCDEF123456789'},
ITEM_TYPE: CONTENT_TYPE.COMPOSITE,
'groups': [{'id': 'root', 'refs': [{'idRef': 'main'}], 'role': 'grpRole:NEP'},
{'id': 'main',
'refs': [{'location': ARCHIVE, ITEM_TYPE: CONTENT_TYPE.COMPOSITE, RESIDREF: '6'}],
'role': 'grpRole:main'}],
'firstcreated': utcnow(),
'expiry': utcnow() + timedelta(minutes=20),
'unique_name': '#100',
ITEM_STATE: CONTENT_STATE.PROGRESS}]
def setUp(self):
super().setUp()
self.init_data()
self.app.data.insert('users', self.users)
self.app.data.insert('desks', self.desks)
self.app.data.insert('products', self.products)
self.app.data.insert('subscribers', self.subscribers)
self.app.data.insert(ARCHIVE, self.articles)
self.filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), "validators.json")
self.json_data = [
{"_id": "kill_text", "act": "kill", "type": "text", "schema": {"headline": {"type": "string"}}},
{"_id": "publish_text", "act": "publish", "type": "text", "schema": {}},
{"_id": "correct_text", "act": "correct", "type": "text", "schema": {}},
{"_id": "publish_composite", "act": "publish", "type": "composite", "schema": {}},
]
self.article_versions = self._init_article_versions()
with open(self.filename, "w+") as file:
json.dump(self.json_data, file)
init_app(self.app)
ValidatorsPopulateCommand().run(self.filename)
self.app.media.url_for_media = MagicMock(return_value='url_for_media')
self.app.media.put = MagicMock(return_value='media_id')
def tearDown(self):
super().tearDown()
if self.filename and os.path.exists(self.filename):
os.remove(self.filename)
def _init_article_versions(self):
resource_def = self.app.config['DOMAIN']['archive_versions']
version_id = versioned_id_field(resource_def)
return [{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4f9',
version_id: '1',
ITEM_TYPE: CONTENT_TYPE.TEXT,
config.VERSION: 1,
'urgency': 4,
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'dateline': {'located': {'city': 'Sydney'}},
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
ITEM_STATE: CONTENT_STATE.DRAFT,
'expiry': utcnow() + timedelta(minutes=20),
'unique_name': '#8'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4f9',
version_id: '1',
ITEM_TYPE: CONTENT_TYPE.TEXT,
config.VERSION: 2,
'urgency': 4,
'headline': 'Two students missing',
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'dateline': {'located': {'city': 'Sydney'}},
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
ITEM_STATE: CONTENT_STATE.SUBMITTED,
'expiry': utcnow() + timedelta(minutes=20),
'unique_name': '#8'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4f9',
version_id: '1',
ITEM_TYPE: CONTENT_TYPE.TEXT,
config.VERSION: 3,
'urgency': 4,
'headline': 'Two students missing',
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'ednote': 'Andrew Marwood contributed to this article',
'dateline': {'located': {'city': 'Sydney'}},
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
ITEM_STATE: CONTENT_STATE.PROGRESS,
'expiry': utcnow() + timedelta(minutes=20),
'unique_name': '#8'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4f9',
version_id: '1',
ITEM_TYPE: CONTENT_TYPE.TEXT,
config.VERSION: 4,
'body_html': 'Test body',
'urgency': 4,
'headline': 'Two students missing',
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'ednote': 'Andrew Marwood contributed to this article',
'dateline': {'located': {'city': 'Sydney'}},
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject': [{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
ITEM_STATE: CONTENT_STATE.PROGRESS,
'expiry': utcnow() + timedelta(minutes=20),
'unique_name': '#8'}]
def _is_publish_queue_empty(self):
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(0, queue_items.count())
def _add_content_filters(self, product, is_global=False):
product['content_filter'] = {'filter_id': 1, 'filter_type': 'blocking'}
self.app.data.insert('filter_conditions',
[{'_id': 1,
'field': 'headline',
'operator': 'like',
'value': 'tor',
'name': 'test-1'}])
self.app.data.insert('filter_conditions',
[{'_id': 2,
'field': 'urgency',
'operator': 'in',
'value': '2',
'name': 'test-2'}])
self.app.data.insert('filter_conditions',
[{'_id': 3,
'field': 'headline',
'operator': 'endswith',
'value': 'tor',
'name': 'test-3'}])
self.app.data.insert('filter_conditions',
[{'_id': 4,
'field': 'urgency',
'operator': 'in',
'value': '2,3,4',
'name': 'test-4'}])
get_resource_service('content_filters').post([{'_id': 1, 'name': 'pf-1', 'is_global': is_global,
'content_filter': [{"expression": {"fc": [4, 3]}},
{"expression": {"fc": [1, 2]}}]
}])
def test_publish(self):
doc = self.articles[3].copy()
get_resource_service(ARCHIVE_PUBLISH).patch(id=doc['_id'], updates={ITEM_STATE: CONTENT_STATE.PUBLISHED})
published_doc = get_resource_service(ARCHIVE).find_one(req=None, _id=doc['_id'])
self.assertIsNotNone(published_doc)
self.assertEqual(published_doc[config.VERSION], doc[config.VERSION] + 1)
self.assertEqual(published_doc[ITEM_STATE], ArchivePublishService().published_state)
def test_versions_across_collections_after_publish(self):
self.app.data.insert('archive_versions', self.article_versions)
# Publishing an Article
doc = self.articles[7]
original = doc.copy()
published_version_number = original[config.VERSION] + 1
get_resource_service(ARCHIVE_PUBLISH).patch(id=doc[config.ID_FIELD],
updates={ITEM_STATE: CONTENT_STATE.PUBLISHED,
config.VERSION: published_version_number})
article_in_production = get_resource_service(ARCHIVE).find_one(req=None, _id=original[config.ID_FIELD])
self.assertIsNotNone(article_in_production)
self.assertEqual(article_in_production[ITEM_STATE], CONTENT_STATE.PUBLISHED)
self.assertEqual(article_in_production[config.VERSION], published_version_number)
enqueue_published()
lookup = {'item_id': original[config.ID_FIELD], 'item_version': published_version_number}
queue_items = list(get_resource_service(PUBLISH_QUEUE).get(req=None, lookup=lookup))
assert len(queue_items) > 0, \
"Transmission Details are empty for published item %s" % original[config.ID_FIELD]
lookup = {'item_id': original[config.ID_FIELD], config.VERSION: published_version_number}
request = ParsedRequest()
request.args = {'aggregations': 0}
items_in_published_collection = list(get_resource_service(PUBLISHED).get(req=request, lookup=lookup))
assert len(items_in_published_collection) > 0, \
"Item not found in published collection %s" % original[config.ID_FIELD]
def test_queue_transmission_for_item_scheduled_future(self):
self._is_publish_queue_empty()
doc = copy(self.articles[9])
doc['item_id'] = doc['_id']
schedule_date = utcnow() + timedelta(hours=2)
updates = {
'publish_schedule': schedule_date,
'schedule_settings': {
'utc_publish_schedule': schedule_date
}
}
get_resource_service(ARCHIVE).patch(id=doc['_id'], updates=updates)
get_resource_service(ARCHIVE_PUBLISH).patch(id=doc['_id'], updates={ITEM_STATE: CONTENT_STATE.SCHEDULED})
enqueue_published()
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(0, queue_items.count())
def test_queue_transmission_for_item_scheduled_elapsed(self):
self._is_publish_queue_empty()
doc = copy(self.articles[9])
doc['item_id'] = doc['_id']
schedule_date = utcnow() + timedelta(minutes=10)
updates = {
'publish_schedule': schedule_date,
'schedule_settings': {
'utc_publish_schedule': schedule_date
}
}
get_resource_service(ARCHIVE).patch(id=doc['_id'], updates=updates)
get_resource_service(ARCHIVE_PUBLISH).patch(id=doc['_id'], updates={ITEM_STATE: CONTENT_STATE.SCHEDULED})
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(0, queue_items.count())
schedule_in_past = utcnow() + timedelta(minutes=-10)
get_resource_service(PUBLISHED).update_published_items(doc['_id'], 'schedule_settings',
{'utc_publish_schedule': schedule_in_past})
get_resource_service(PUBLISHED).update_published_items(doc['_id'], 'publish_schedule', schedule_in_past)
enqueue_published()
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(5, queue_items.count())
def test_queue_transmission_for_digital_channels(self):
self._is_publish_queue_empty()
doc = copy(self.articles[1])
doc['item_id'] = doc['_id']
subscribers, subscribers_yet_to_receive, subscriber_codes = \
EnqueuePublishedService().get_subscribers(doc, SUBSCRIBER_TYPES.DIGITAL)
EnqueueService().queue_transmission(doc, subscribers, subscriber_codes)
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(2, queue_items.count())
expected_subscribers = ['3', '5']
for item in queue_items:
self.assertIn(item["subscriber_id"], expected_subscribers, 'item {}'.format(item))
def test_queue_transmission_for_wire_channels_with_codes(self):
self._is_publish_queue_empty()
doc = copy(self.articles[1])
doc['item_id'] = doc['_id']
subscribers, subscribers_yet_to_receive, subscriber_codes = \
EnqueuePublishedService().get_subscribers(doc, SUBSCRIBER_TYPES.WIRE)
EnqueueService().queue_transmission(doc, subscribers, subscriber_codes)
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(5, queue_items.count())
expected_subscribers = ['1', '2', '4', '5']
for item in queue_items:
self.assertIn(item['subscriber_id'], expected_subscribers, 'item {}'.format(item))
if item['subscriber_id'] == '5':
self.assertIn('def', item['codes'])
self.assertIn('abc', item['codes'])
def test_queue_transmission_wrong_article_type_fails(self):
self._is_publish_queue_empty()
doc = copy(self.articles[0])
doc['item_id'] = doc['_id']
doc[ITEM_TYPE] = CONTENT_TYPE.PICTURE
subscribers, subscribers_yet_to_receive, subscriber_codes = \
EnqueuePublishedService().get_subscribers(doc, SUBSCRIBER_TYPES.DIGITAL)
no_formatters, queued = EnqueueService().queue_transmission(doc, subscribers, subscriber_codes)
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(1, queue_items.count())
self.assertEqual(1, len(no_formatters))
self.assertTrue(queued)
subscribers, subscribers_yet_to_receive, subscriber_codes = \
EnqueuePublishedService().get_subscribers(doc, SUBSCRIBER_TYPES.WIRE)
no_formatters, queued = EnqueueService().queue_transmission(doc, subscribers)
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(2, queue_items.count())
self.assertEqual(0, len(no_formatters))
self.assertTrue(queued)
def test_delete_from_queue_by_article_id(self):
self._is_publish_queue_empty()
doc = copy(self.articles[7])
doc['item_id'] = doc['_id']
archive_publish = get_resource_service(ARCHIVE_PUBLISH)
archive_publish.patch(id=doc['_id'], updates={ITEM_STATE: CONTENT_STATE.PUBLISHED})
enqueue_published()
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(4, queue_items.count())
# this will delete queue transmission for the wire article not the takes package.
publish_queue.PublishQueueService(PUBLISH_QUEUE, superdesk.get_backend()).delete_by_article_id(doc['_id'])
self._is_publish_queue_empty()
def test_can_publish_article(self):
product = self.products[0]
self._add_content_filters(product, is_global=False)
can_it = EnqueueService().conforms_content_filter(product, self.articles[8])
self.assertFalse(can_it)
product['content_filter']['filter_type'] = 'permitting'
can_it = EnqueueService().conforms_content_filter(product, self.articles[8])
self.assertTrue(can_it)
product.pop('content_filter')
def test_can_publish_article_with_global_filters(self):
subscriber = self.subscribers[0]
product = self.products[0]
self._add_content_filters(product, is_global=True)
service = get_resource_service('content_filters')
req = ParsedRequest()
req.args = {'is_global': True}
global_filters = service.get(req=req, lookup=None)
can_it = EnqueueService().conforms_global_filter(subscriber, global_filters, self.articles[8])
self.assertFalse(can_it)
subscriber['global_filters'] = {'1': False}
can_it = EnqueueService().conforms_global_filter(subscriber, global_filters, self.articles[8])
self.assertTrue(can_it)
product.pop('content_filter')
def test_targeted_for_excludes_digital_subscribers(self):
ValidatorsPopulateCommand().run(self.filename)
updates = {'targeted_for': [{'name': 'New South Wales', 'allow': True}]}
doc_id = self.articles[9][config.ID_FIELD]
get_resource_service(ARCHIVE).patch(id=doc_id, updates=updates)
get_resource_service(ARCHIVE_PUBLISH).patch(id=doc_id, updates={ITEM_STATE: CONTENT_STATE.PUBLISHED})
enqueue_published()
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(4, queue_items.count())
expected_subscribers = ['1', '2', '4']
for item in queue_items:
self.assertIn(item["subscriber_id"], expected_subscribers, 'item {}'.format(item))
def test_maintain_latest_version_for_published(self):
def get_publish_items(item_id, last_version):
query = {'query': {'filtered': {'filter': {'and': [
{'term': {'item_id': item_id}}, {'term': {LAST_PUBLISHED_VERSION: last_version}}
]}}}}
request = ParsedRequest()
request.args = {'source': json.dumps(query), 'aggregations': 0}
return self.app.data.find(PUBLISHED, req=request, lookup=None)
ValidatorsPopulateCommand().run(self.filename)
get_resource_service(ARCHIVE).patch(id=self.articles[1][config.ID_FIELD],
updates={'publish_schedule': None})
doc = get_resource_service(ARCHIVE).find_one(req=None, _id=self.articles[1][config.ID_FIELD])
get_resource_service(ARCHIVE_PUBLISH).patch(id=doc[config.ID_FIELD],
updates={ITEM_STATE: CONTENT_STATE.PUBLISHED})
enqueue_published()
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(7, queue_items.count())
request = ParsedRequest()
request.args = {'aggregations': 0}
published_items = self.app.data.find(PUBLISHED, request, None)
self.assertEqual(2, published_items.count())
published_digital_doc = next((item for item in published_items
if item.get(PACKAGE_TYPE) == TAKES_PACKAGE), None)
published_doc = next((item for item in published_items
if item.get('item_id') == doc[config.ID_FIELD]), None)
self.assertEqual(published_doc[LAST_PUBLISHED_VERSION], True)
self.assertEqual(published_digital_doc[LAST_PUBLISHED_VERSION], True)
get_resource_service(ARCHIVE_CORRECT).patch(id=doc[config.ID_FIELD],
updates={ITEM_STATE: CONTENT_STATE.CORRECTED})
enqueue_published()
queue_items = self.app.data.find(PUBLISH_QUEUE, None, None)
self.assertEqual(14, queue_items.count())
published_items = self.app.data.find(PUBLISHED, request, None)
self.assertEqual(4, published_items.count())
last_published_digital = get_publish_items(published_digital_doc['item_id'], True)
self.assertEqual(1, last_published_digital.count())
last_published = get_publish_items(published_doc['item_id'], True)
self.assertEqual(1, last_published.count())
def test_added_removed_in_a_package(self):
package = {"groups": [{"id": "root", "refs": [{"idRef": "main"}], "role": "grpRole:NEP"},
{"id": "main", "refs": [
{
"renditions": {},
"slugline": "Boat",
"guid": "123",
"headline": "item-1 headline",
"location": "archive",
"type": "text",
"itemClass": "icls:text",
"residRef": "123"
},
{
"renditions": {},
"slugline": "Boat",
"guid": "456",
"headline": "item-2 headline",
"location": "archive",
"type": "text",
"itemClass": "icls:text",
"residRef": "456"
},
{
"renditions": {},
"slugline": "Boat",
"guid": "789",
"headline": "item-3 headline",
"location": "archive",
"type": "text",
"itemClass": "icls:text",
"residRef": "789"
}], "role": "grpRole:main"}],
"task": {
"user": "#CONTEXT_USER_ID#",
"status": "todo",
"stage": "#desks.incoming_stage#",
"desk": "#desks._id#"},
"guid": "compositeitem",
"headline": "test package",
"state": "submitted",
"type": "composite"}
updates = {"groups": [{"id": "root", "refs": [{"idRef": "main"}], "role": "grpRole:NEP"},
{"id": "main", "refs": [
{
"renditions": {},
"slugline": "Boat",
"guid": "123",
"headline": "item-1 headline",
"location": "archive",
"type": "text",
"itemClass": "icls:text",
"residRef": "123"
},
{
"renditions": {},
"slugline": "Boat",
"guid": "555",
"headline": "item-2 headline",
"location": "archive",
"type": "text",
"itemClass": "icls:text",
"residRef": "555"
},
{
"renditions": {},
"slugline": "Boat",
"guid": "456",
"headline": "item-2 headline",
"location": "archive",
"type": "text",
"itemClass": "icls:text",
"residRef": "456"
}], "role": "grpRole:main"}],
"task": {
"user": "#CONTEXT_USER_ID#",
"status": "todo",
"stage": "#desks.incoming_stage#",
"desk": "#desks._id#"},
"guid": "compositeitem",
"headline": "test package",
"state": "submitted",
"type": "composite"}
items = PackageService().get_residrefs(package)
removed_items, added_items = ArchivePublishService()._get_changed_items(items, updates)
self.assertEqual(len(removed_items), 1)
self.assertEqual(len(added_items), 1)
def test_publish_associations(self):
item = {
'associations': {
'sidebar': {
'headline': 'foo',
'pubstatus': 'canceled',
},
'image': {
'pubstatus': 'usable',
'headline': 'bar',
'fetch_endpoint': 'paimg',
'renditions': {
'original': {
'href': 'https://c2.staticflickr.com/4/3665/9203816834_3329fac058_t.jpg',
'width': 100,
'height': 67,
'mimetype': 'image/jpeg'
},
'thumbnail': {
'CropLeft': 10,
'CropRight': 50,
'CropTop': 10,
'CropBottom': 40,
}
}
}
}
}
thumbnail_crop = {'width': 40, 'height': 30}
with patch.object(CropService, 'get_crop_by_name', return_value=thumbnail_crop):
ArchivePublishService()._publish_associations(item, 'baz')
self.assertNotIn('sidebar', item['associations'])
self.assertIn('image', item['associations'])
image = item['associations']['image']
renditions = image['renditions']
print(renditions)
self.assertEqual(40, renditions['thumbnail']['width'])
self.assertEqual(30, renditions['thumbnail']['height'])
self.assertEqual('image/jpeg', renditions['thumbnail']['mimetype'])
self.assertEqual('url_for_media', renditions['thumbnail']['href'])
| agpl-3.0 | -3,178,256,074,282,106,000 | 52.38785 | 120 | 0.445055 | false |
3dfxsoftware/cbss-addons | mrp_variation/__openerp__.py | 1 | 1832 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com
# All Rights Reserved.
# [email protected]
############################################################################
# Coded by: julio ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "MRP Variation",
"version": "1.1",
"author" : "Vauxoo",
"category": "Generic Modules/MRP",
"website" : "http://www.vauxoo.com/",
"description": """ Add table to veiw variation
""",
'depends': ['mrp_consume_produce','mrp_pt_planified'],
'init_xml': [],
'update_xml': [
'mrp_variation_view.xml',
'security/mrp_variation_security.xml',
'security/ir.model.access.csv',
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 | -3,574,338,321,294,866,000 | 37.978723 | 78 | 0.543122 | false |
allison-group/indigo-bondorder | archive/src/indigox/astar.py | 1 | 9383 | from heapq import heappop, heappush
from itertools import count as _count
from itertools import product
from time import perf_counter
from indigox.config import (INFINITY, BASIS_LEVEL, TIMEOUT, HEURISTIC,
COUNTERPOISE_CORRECTED, ELECTRON_PAIRS,
INITIAL_LO_ENERGY)
from indigox.data import atom_enes, bond_enes
from indigox.exception import IndigoMissingParameters
from indigox.lopt import LocalOptimisation
from indigox.misc import (graph_to_dist_graph, electron_spots, electrons_to_add,
locs_sort, BondOrderAssignment, graph_setup, HashBitArray,
node_energy, bitarray_to_assignment, calculable_nodes)
BSSE = int(not COUNTERPOISE_CORRECTED)
class AStar(BondOrderAssignment):
def __init__(self, G):
self.init_G = G
def initialise(self):
if HEURISTIC.lower() == 'tight':
self.heuristic = abstemious
elif HEURISTIC.lower() == 'loose':
self.heuristic = promiscuous
else:
raise IndigoMissingParameters('Unknown A* heuristic type: {}'
''.format(HEURISTIC))
self.h_count = 0
self.c_count = 0
self.G = graph_to_dist_graph(self.init_G)
self.target = electrons_to_add(self.init_G)
self.locs = locs_sort(electron_spots(self.init_G), self.G)
self.choices = []
for n in self.locs:
n_count = self.locs.count(n)
if (n,n_count) not in self.choices:
self.choices.append((n,n_count))
for i in range(len(self.choices)):
self.choices[i] = self.choices[i][1]
if not INITIAL_LO_ENERGY:
self.max_queue_energy = INFINITY / 2
else:
lo = LocalOptimisation(self.init_G)
_, self.max_queue_energy = lo.run()
def run(self):
self.start_time = perf_counter()
push = heappush
pop = heappop
c = _count()
self.initialise()
source = HashBitArray(len(self.locs))
source.setall(False)
i_count = 0
explored_count = 0;
enqueued_count = 0;
start = 0
try:
stop = self.choices[0]
except IndexError:
stop = 0
child = 1
always_calculable = calculable_nodes(self.G, source, 0, self.locs,
self.target)
q = [(0, next(c), (source, 0), start, stop, child,
self.calc_energy(source, always_calculable, stop), None)]
enqueued = {}
explored = {}
while q:
qcost, _, curvert, start, stop, child, dist, parent = pop(q)
i_count += 1
if i_count < 20:
print(curvert, start, stop)
# print(curvert[0])
if stop >= len(self.locs) and curvert[0].count() == self.target:
bitarray_to_assignment(self.init_G, curvert[0], self.locs)
print(i_count + len(q), "items passed through queue")
print("Explored:", explored_count, "Enqueued:", enqueued_count)
print("{:.3f} seconds run time".format(perf_counter()-self.start_time))
return self.init_G, dist
# if curvert in explored:
# explored_explored += 1
# continue
# explored[curvert] = parent
for n in self.neighbours(curvert[0], start, stop):
if i_count < 20:
print(" ",n)
# if n in explored:
# explored_count += 1
# continue
calculable = calculable_nodes(self.G, n[0], stop, self.locs,
self.target)
ncost = self.calc_energy(n[0], calculable, stop)
# if n in enqueued:
# enqueued_count += 1
# qcost, h = enqueued[n]
# if qcost <= ncost:
# continue
# else:
# self.h_count += 1
h = self.heuristic(self.G, n[0], calculable, stop,
self.target, self.locs)
if ncost + h > self.max_queue_energy:
continue
# enqueued[n] = ncost, h
try:
push(q, (ncost + h, next(c), n, stop, stop + self.choices[child],
child + 1, ncost, curvert))
except IndexError:
push(q, (ncost + h, next(c), n, stop, stop + 1,child, ncost,
curvert))
print(i_count, "items passed through queue")
print("{:.3f} seconds run time".format(perf_counter()-self.start_time))
def neighbours(self, a, start, stop):
num = stop - start
for i in range(num + 1):
b = HashBitArray(a.to01())
j = 0
while j < i:
b[start + j] = True
j += 1
yield b, stop
def calc_energy(self, a, calculable, stop, g_info=None):
self.c_count += 1
placed = a[:stop].count()
to_place = self.target - placed
available_places = a.length() - stop - to_place
if to_place < 0 or available_places < 0:
return INFINITY
if g_info is None:
graph_setup(self.G, a, self.locs)
else:
for n in self.G:
self.G.node[n]['e-'] = g_info[n]['e-']
if len(n) == 1:
self.G.node[n]['fc'] = g_info[n]['fc']
ene = sum(node_energy(self.G, n) for n in calculable)
if ene > INFINITY / 2:
ene = INFINITY
return round(ene, 5)
# Heuristics
def promiscuous(G, a, calculable, stop, target, locs):
h_ene = 0
placed = a[:stop].count()
to_place = target - placed
if not to_place:
return h_ene
if to_place < 0:
return INFINITY
# Doesn't worry about charged bonds
a_enes = atom_enes[BASIS_LEVEL]
b_enes = bond_enes[BASIS_LEVEL]
for n in G:
if n in calculable:
continue
if len(n) == 1:
h_ene += min(a_enes[G.node[n]['Z']].values())
elif len(n) == 2:
a_element = G.node[(n[0],)]['Z']
b_element = G.node[(n[1],)]['Z']
if b_element < a_element:
a_element, b_element = b_element, a_element
min_ene = 0
for o in (1,2,3):
try:
o_ene = b_enes[(a_element, b_element, o)][BSSE]
except KeyError:
continue
if o_ene < min_ene:
min_ene = o_ene
h_ene += min_ene
return h_ene
def abstemious(G, a, calculable, stop, target, locs):
h_ene = 0
placed = a[:stop].count()
to_place = target - placed
if not to_place:
return h_ene
if to_place < 0:
return INFINITY
extra_counts = {k:locs.count(k) for k in set(locs)}
extra_able = set(locs[stop:])
graph_setup(G, a, locs)
# note where all the extra electrons can go
for n in G:
G.node[n]['h'] = 0
if n not in extra_able:
continue
added = 0
while added < to_place and added < extra_counts[n]:
if ELECTRON_PAIRS:
G.node[n]['h'] += 2
else:
G.node[n]['h'] += 1
added += 1
# figure out the lowest possible energy attainable for each node
for n in sorted(G, key=len):
# atoms formal charges
if len(n) == 1:
addable = []
step = 2 if ELECTRON_PAIRS else 1
addable.append(range(0, G.node[n]['h'] + 1, step))
for nb in G[n]:
addable.append(range(0, G.node[nb]['h'] // 2 + 1))
fcs = set()
for x in product(*addable):
real_sum = (x[0]//2 + sum(x[1:]) if ELECTRON_PAIRS
else x[0] + 2 * sum(x[1:]))
if real_sum <= to_place:
fcs.add((G.node[n]['fc'] - sum(x), real_sum))
G.node[n]['poss_fcs'] = fcs
# need all possible formal charges for all atoms
if n in calculable:
continue
fcs = set(x[0] for x in fcs)
a_enes = atom_enes[BASIS_LEVEL][G.node[n]['Z']]
try:
h_ene += min(v for k, v in a_enes.items() if k in fcs)
except ValueError:
h_ene = INFINITY
if len(n) == 2:
if n in calculable:
continue
step = 2
bos = {G.node[n]['e-'] + x
for x in range(0, G.node[n]['h'] + 1, step)}
a_ele = G.node[(n[0],)]['Z']
b_ele = G.node[(n[1],)]['Z']
if b_ele < a_ele:
a_ele, b_ele = b_ele, a_ele
b_enes = bond_enes[BASIS_LEVEL]
h_ene += min(b_enes[(a_ele, b_ele, o//2)][BSSE] for o in bos)
return h_ene
| mit | 7,778,853,065,744,993,000 | 34.950192 | 87 | 0.470638 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyKDE4/kio/KUriFilter.py | 1 | 1212 | # encoding: utf-8
# module PyKDE4.kio
# from /usr/lib/python2.7/dist-packages/PyKDE4/kio.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdeui as __PyKDE4_kdeui
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
class KUriFilter(): # skipped bases: <type 'sip.wrapper'>
# no doc
def filteredUri(self, *args, **kwargs): # real signature unknown
pass
def filterSearchUri(self, *args, **kwargs): # real signature unknown
pass
def filterUri(self, *args, **kwargs): # real signature unknown
pass
def loadPlugins(self, *args, **kwargs): # real signature unknown
pass
def pluginNames(self, *args, **kwargs): # real signature unknown
pass
def self(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
NormalTextFilter = 1
SearchFilterType = None # (!) real value is ''
SearchFilterTypes = None # (!) real value is ''
WebShortcutFilter = 2
| gpl-2.0 | 3,505,578,454,334,459,000 | 25.933333 | 101 | 0.648515 | false |
albertoibm/Thesis | Archivo2.py | 1 | 3593 | import libardrone
import numpy
from time import time,sleep
from sys import stdout,exit,argv
from math import sqrt,pi
from PID import PID
import diffOrdN
GRADOS=pi/180
class Integral:
""" Calcula la integral de una funcion dada como parametro"""
def __init__(self,f):
self.f=f
self.integral=0.
self.val=0.
def update(self,x,dt):
self.integral+=(self.f(x)+self.val)/2*dt
self.val=self.f(x)
ttotal=10 ## Tiempo total de vuelo
factor=1
### Parametros del controlador
delta=numpy.matrix(numpy.diag([12,12,8]))
K1=4.5*numpy.sqrt(delta)
K2=1.1*delta
lamda=numpy.matrix(numpy.diag([3,3,3]))
### Parametros del cuadricoptero
l=0.13
Ixx=24.1e-3
Iyy=23.2e-3
Izz=45.1e-2
b=0.0006646195542576290
b=0.000064601020673842
d=b*9.72
## Matriz de inercia y su inversa
g=numpy.matrix(numpy.diag([l/Ixx,l/Iyy,1/Izz]))
ginv=g.I
## Vectores de x deseada y su derivada
xd=numpy.array([[0],[0],[0]])
xpd=numpy.array([[0],[0],[0]])
Altd=260
## Objeto diferenciador numerico por modos deslizantes
difN = 4 ## Orden del diferenciador
dif = diffOrdN.DiffOrdN(difN,[12,8,4,3.5,2.1])
## Objeto que calcula la integral de la funcion signo
intsgn=Integral(lambda x:numpy.sign(x))
## Controlador de altitud
ctrlalt=PID(.7,.2,.1)
### Se establece configuracion con el ardrone, se apagan los motores y se cambia el modo de camara
stdout.write("Estableciendo comunicacion con el ARDrone\n")
stdout.flush()
drone=libardrone.ARDrone()
sleep(1)
print "Listo!"
stdout.write("Estableciendo configuracion inicial\n")
stdout.flush()
drone.reset()
sleep(0.1)
drone.trim()
sleep(1.5)
drone.reset()
print "Encendiendo motores"
drone.pwm(1,1,1,1)
sleep(5)
drone.zap(libardrone.ZAP_CHANNEL_LARGE_HORI_SMALL_VERT)
sleep(0.1)
print "Listo!"
## Vectores para guardar datos de vuelo
angs=[]
us=[]
oms=[]
ts=[]
## define el tiempo inicial
ta=time()
t0=ta
xa=0
while time()-t0<ttotal:
dt = -ta + time()
ta = time()
Alt = 260#drone.navdata[0]['altitude']
Th = drone.navdata[0]['theta']*GRADOS
Ph = drone.navdata[0]['phi']*GRADOS
Ps = drone.navdata[0]['psi']*GRADOS
x = numpy.matrix([[Th],[Ph],[Ps]])
dif.update(x,dt)
o = dif.output()
x = o[difN]
xp = o[difN-1]
# xp = (x - xa)/dt
xa = x+0
e = xd-x
ep = xpd-xp
s = ep+lamda*e
intsgn.update(s,dt)
u = -lamda*ep-\
K1*numpy.matrix(numpy.array(numpy.sqrt(numpy.abs(s)))*numpy.array(numpy.sign(s)))\
-K2*intsgn.integral
u = ginv*u
om1=om2=om3=om4 = 0
U4 = max(0,ctrlalt.sal(Altd-Alt,dt))
try:om1=int(round(sqrt(-(b*u[2]+d*u[0]-d*U4+d*u[1])/(4*b*d))*factor))
except:pass
try:om2=int(round(sqrt((-d*u[0]+d*u[1]+b*u[2]+d*U4)/(4*b*d))*factor))
except:pass
try:om3=int(round(sqrt(-(-d*u[1]+b*u[2]-d*U4-d*u[0])/(4*b*d))*factor))
except:pass
try:om4=int(round(sqrt((d*U4+b*u[2]+d*u[0]-d*u[1])/(4*b*d))*factor))
except:pass
om1=10+om1 if om1<10 else om1
om2=10+om2 if om2<10 else om2
om3=10+om3 if om3<10 else om3
om4=10+om4 if om4<10 else om4
stdout.write("\b"*100+"(%.2f,%.2f,%.2f,%.2f)"%(U4,u[0],u[1],u[2]))
stdout.write("\b"*0+"|[%.2f,%.2f,%.2f,%.2f]"%(om1,om2,om3,om4))
stdout.write("{%.4f} "%dt)
stdout.flush()
if "-p" not in argv:
drone.pwm(om1,om2,om3,om4)
angs.append([x[0][0],x[1][0],x[2][0]]) ## Th,Ph,Ps
us.append([U4,u[0],u[1],u[2]])
oms.append([om1,om2,om3,om4])
ts.append(time()-t0)
drone.pwm(0,0,0,0)
drone.halt()
print ""
archivo=open("res_supertwisting.txt",'w')
for i in range(len(ts)):
archivo.write("%.3f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f\n"%\
(ts[i],angs[i][0],angs[i][1],us[i][0],us[i][1],us[i][2],\
us[i][3],oms[i][1],oms[i][2],oms[i][3]))
archivo.close()
exit()
| gpl-2.0 | 8,193,664,117,015,958,000 | 23.442177 | 98 | 0.651266 | false |
drusk/fishcounter | fishcounter/tracking/camshift.py | 1 | 3739 | # Copyright (C) 2013 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tracking based on the Camshift algorithm.
"""
import cv2
import numpy as np
from fishcounter.segment import HSVColourSegmenter
class CamShiftTracker(object):
"""
Uses colour information to track fish regardless of whether they are
moving or not.
"""
def __init__(self):
self.mask_detector = HSVColourSegmenter()
def track(self, current_image, frame_number, moving_objects, stationary_objects):
hsv = cv2.cvtColor(current_image, cv2.COLOR_BGR2HSV)
mask = self.mask_detector.segment(current_image)
for obj in stationary_objects:
bbox = obj.bbox
if bbox.has_negative_area:
print "BBOX has negative area: %s" % bbox
continue
hsv_roi = hsv[bbox.y0:bbox.y1, bbox.x0:bbox.x1]
mask_roi = mask[bbox.y0:bbox.y1, bbox.x0:bbox.x1]
bin_range = [self.mask_detector.hue_min, self.mask_detector.hue_max]
hist = cv2.calcHist([hsv_roi], # source image(s)
[0], # channels to use - just Hue
mask_roi, # mask which source pixels to count
[16], # number of bins
bin_range # first bin min, last bin max
)
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
hist = hist.reshape(-1)
prob = cv2.calcBackProject([hsv], # input image
[0], # channels to use - just Hue
hist, # histogram
bin_range, # first bin min, last bin max
1 # scale factor
)
prob &= mask
stop_criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS,
10, # max iterations
1 # desired accuracy of window center
)
# track_box also contains rotation information which we are not using right now
track_box, track_window = cv2.CamShift(prob, bbox.cv2rect, stop_criteria)
prev_center = bbox.center
bbox.update(track_window)
obj.last_frame_tracked = frame_number
new_center = bbox.center
displacement = np.sqrt(np.square(prev_center[0] - new_center[0]) +
np.square(prev_center[1] - new_center[1]))
if displacement > 6:
stationary_objects.remove(obj)
moving_objects.append(obj)
return moving_objects, stationary_objects
| mit | -920,504,979,399,676,300 | 39.641304 | 91 | 0.598556 | false |
1200wd/1200wd_addons | project_report/__openerp__.py | 1 | 1674 | # -*- coding: utf-8 -*-
##############################################################################
#
# Project Reports
# Copyright (C) 2016 January
# 1200 Web Development
# http://1200wd.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Project Reports",
'summary': """Create reports of Projects Tasks and Issues""",
'description': """
Create reports of Projects Tasks and Issues
""",
'author': "1200 Web Development",
'website': "http://1200wd.com",
'category': 'Project',
'version': '8.0.1.1',
'depends': [
'project',
'project_issue',
],
'data': [
'views/project_view.xml',
'reports/project_report.xml',
'reports/project_task_report.xml',
'reports/project_issue_report.xml',
],
'price': 10.00,
'currency': 'EUR',
'demo': [],
'installable': True,
'auto_install': False,
'application': False,
}
| agpl-3.0 | -670,740,339,571,089,000 | 32.48 | 78 | 0.572282 | false |
bbangert/retools | retools/tests/test_limiter.py | 1 | 5913 | # coding: utf-8
import unittest
import time
import redis
from nose.tools import eq_
from mock import Mock
from mock import patch
from retools.limiter import Limiter
from retools import global_connection
class TestLimiterWithMockRedis(unittest.TestCase):
def test_can_create_limiter_without_prefix_and_without_connection(self):
limiter = Limiter(limit=10)
eq_(limiter.redis, global_connection.redis)
eq_(limiter.limit, 10)
eq_(limiter.prefix, 'retools_limiter')
def test_can_create_limiter_without_prefix(self):
mock_redis = Mock(spec=redis.Redis)
limiter = Limiter(limit=10, redis=mock_redis)
eq_(limiter.redis, mock_redis)
eq_(limiter.prefix, 'retools_limiter')
def test_can_create_limiter_with_prefix(self):
mock_redis = Mock(spec=redis.Redis)
limiter = Limiter(limit=10, redis=mock_redis, prefix='something')
eq_(limiter.redis, mock_redis)
eq_(limiter.prefix, 'something')
def test_can_create_limiter_with_expiration(self):
mock_redis = Mock(spec=redis.Redis)
limiter = Limiter(limit=10, redis=mock_redis, expiration_in_seconds=20)
eq_(limiter.expiration_in_seconds, 20)
def test_has_limit(self):
mock_time = Mock()
mock_time.return_value = 40.5
mock_redis = Mock(spec=redis.Redis)
mock_redis.zcard.return_value = 0
limiter = Limiter(limit=10, redis=mock_redis, expiration_in_seconds=20)
with patch('time.time', mock_time):
has_limit = limiter.acquire_limit(key='test1')
eq_(has_limit, True)
mock_redis.zadd.assert_called_once_with('retools_limiter', 'test1', 60.5)
def test_acquire_limit_after_removing_items(self):
mock_time = Mock()
mock_time.return_value = 40.5
mock_redis = Mock(spec=redis.Redis)
mock_redis.zcard.side_effect = [10, 8]
limiter = Limiter(limit=10, redis=mock_redis, expiration_in_seconds=20)
with patch('time.time', mock_time):
has_limit = limiter.acquire_limit(key='test1')
eq_(has_limit, True)
mock_redis.zadd.assert_called_once_with('retools_limiter', 'test1', 60.5)
mock_redis.zremrangebyscore.assert_called_once_with('retools_limiter', '-inf', 40.5)
def test_acquire_limit_fails_even_after_removing_items(self):
mock_time = Mock()
mock_time.return_value = 40.5
mock_redis = Mock(spec=redis.Redis)
mock_redis.zcard.side_effect = [10, 10]
limiter = Limiter(limit=10, redis=mock_redis, expiration_in_seconds=20)
with patch('time.time', mock_time):
has_limit = limiter.acquire_limit(key='test1')
eq_(has_limit, False)
eq_(mock_redis.zadd.called, False)
mock_redis.zremrangebyscore.assert_called_once_with('retools_limiter', '-inf', 40.5)
def test_release_limit(self):
mock_redis = Mock(spec=redis.Redis)
limiter = Limiter(limit=10, redis=mock_redis, expiration_in_seconds=20)
limiter.release_limit(key='test1')
mock_redis.zrem.assert_called_once_with('retools_limiter', 'test1')
class TestLimiterWithActualRedis(unittest.TestCase):
def test_has_limit(self):
limiter = Limiter(prefix='test-%.6f' % time.time(), limit=2, expiration_in_seconds=400)
has_limit = limiter.acquire_limit(key='test1')
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test2')
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test3')
eq_(has_limit, False)
def test_has_limit_after_removing_items(self):
limiter = Limiter(prefix='test-%.6f' % time.time(), limit=2, expiration_in_seconds=400)
has_limit = limiter.acquire_limit(key='test1')
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test2', expiration_in_seconds=-1)
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test3')
eq_(has_limit, True)
def test_has_limit_after_releasing_items(self):
limiter = Limiter(prefix='test-%.6f' % time.time(), limit=2, expiration_in_seconds=400)
has_limit = limiter.acquire_limit(key='test1')
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test2')
eq_(has_limit, True)
limiter.release_limit(key='test2')
has_limit = limiter.acquire_limit(key='test3')
eq_(has_limit, True)
class TestLimiterWithStrictRedis(unittest.TestCase):
def setUp(self):
self.redis = redis.StrictRedis()
def test_has_limit(self):
limiter = Limiter(prefix='test-%.6f' % time.time(), limit=2, expiration_in_seconds=400, redis=self.redis)
has_limit = limiter.acquire_limit(key='test1')
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test2')
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test3')
eq_(has_limit, False)
def test_has_limit_after_removing_items(self):
limiter = Limiter(prefix='test-%.6f' % time.time(), limit=2, expiration_in_seconds=400, redis=self.redis)
has_limit = limiter.acquire_limit(key='test1')
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test2', expiration_in_seconds=-1)
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test3')
eq_(has_limit, True)
def test_has_limit_after_releasing_items(self):
limiter = Limiter(prefix='test-%.6f' % time.time(), limit=2, expiration_in_seconds=400, redis=self.redis)
has_limit = limiter.acquire_limit(key='test1')
eq_(has_limit, True)
has_limit = limiter.acquire_limit(key='test2')
eq_(has_limit, True)
limiter.release_limit(key='test2')
has_limit = limiter.acquire_limit(key='test3')
eq_(has_limit, True)
| mit | -4,524,395,552,771,372,500 | 30.962162 | 113 | 0.637409 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.